From 7a57bbd8c189fbf8c82f7e1239479e9eb82781b3 Mon Sep 17 00:00:00 2001 From: Michael Morello Date: Thu, 27 Apr 2023 11:37:39 +0200 Subject: [PATCH 01/26] Harden Elasticsearch SecurityContext (#6703) * In the init container: copy the Elasticsearch configuration, and then create links. * Set default security context for Elasticsearch containers, including the sidecars. * Add E2E check for securityContext. --- .../beat/common/stackmon/stackmon_test.go | 9 +++ .../common/defaults/pod_template.go | 16 +++++ .../common/keystore/initcontainer.go | 12 +++- pkg/controller/common/stackmon/sidecar.go | 17 +++-- pkg/controller/elasticsearch/driver/driver.go | 7 +- .../elasticsearch/initcontainer/prepare_fs.go | 24 +++---- .../initcontainer/prepare_fs_script.go | 27 ++++---- .../elasticsearch/nodespec/podspec.go | 14 ++++ .../elasticsearch/nodespec/podspec_test.go | 34 ++++++++++ .../elasticsearch/nodespec/volumes.go | 6 ++ .../securitycontext/securitycontext.go | 47 +++++++++++++ .../elasticsearch/stackmon/sidecar.go | 9 ++- .../elasticsearch/stackmon/sidecar_test.go | 38 ++++++++--- pkg/controller/elasticsearch/volume/names.go | 3 + .../kibana/stackmon/sidecar_test.go | 16 ++--- .../elasticsearch/check_securitycontext.go | 68 +++++++++++++++++++ test/e2e/test/elasticsearch/checks_k8s.go | 1 + 17 files changed, 295 insertions(+), 53 deletions(-) create mode 100644 pkg/controller/elasticsearch/securitycontext/securitycontext.go create mode 100644 test/e2e/test/elasticsearch/check_securitycontext.go diff --git a/pkg/controller/beat/common/stackmon/stackmon_test.go b/pkg/controller/beat/common/stackmon/stackmon_test.go index 6308b1971e..969e0020d1 100644 --- a/pkg/controller/beat/common/stackmon/stackmon_test.go +++ b/pkg/controller/beat/common/stackmon/stackmon_test.go @@ -76,6 +76,11 @@ func TestMetricBeat(t *testing.T) { ReadOnly: true, MountPath: "/etc/metricbeat-config", }, + { + Name: "metricbeat-data", + ReadOnly: false, + MountPath: "/usr/share/metricbeat/data", + }, { Name: "shared-data", ReadOnly: false, @@ -129,6 +134,10 @@ output: Name: "beat-metricbeat-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "beat-beat-monitoring-metricbeat-config", Optional: pointer.Bool(false)}}, }, + { + Name: "metricbeat-data", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, { Name: "shared-data", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, diff --git a/pkg/controller/common/defaults/pod_template.go b/pkg/controller/common/defaults/pod_template.go index 81cfea9331..ffd842f5ec 100644 --- a/pkg/controller/common/defaults/pod_template.go +++ b/pkg/controller/common/defaults/pod_template.go @@ -343,6 +343,22 @@ func (b *PodTemplateBuilder) WithPodSecurityContext(securityContext corev1.PodSe return b } +// WithContainersSecurityContext sets Containers and InitContainers SecurityContext. +// Must be called once all the Containers and InitContainers have been set. +func (b *PodTemplateBuilder) WithContainersSecurityContext(securityContext corev1.SecurityContext) *PodTemplateBuilder { + for i := range b.PodTemplate.Spec.Containers { + if b.PodTemplate.Spec.Containers[i].SecurityContext == nil { + b.PodTemplate.Spec.Containers[i].SecurityContext = securityContext.DeepCopy() + } + } + for i := range b.PodTemplate.Spec.InitContainers { + if b.PodTemplate.Spec.InitContainers[i].SecurityContext == nil { + b.PodTemplate.Spec.InitContainers[i].SecurityContext = securityContext.DeepCopy() + } + } + return b +} + func (b *PodTemplateBuilder) WithAutomountServiceAccountToken() *PodTemplateBuilder { if b.PodTemplate.Spec.AutomountServiceAccountToken == nil { t := true diff --git a/pkg/controller/common/keystore/initcontainer.go b/pkg/controller/common/keystore/initcontainer.go index 6c1df54f70..d3e7aa13be 100644 --- a/pkg/controller/common/keystore/initcontainer.go +++ b/pkg/controller/common/keystore/initcontainer.go @@ -32,6 +32,8 @@ type InitContainerParameters struct { // SkipInitializedFlag when true do not use a flag to ensure the keystore is created only once. This should only be set // to true if the keystore can be forcibly recreated. SkipInitializedFlag bool + // SecurityContext is the security context applied to the keystore container. + SecurityContext *corev1.SecurityContext } // script is a small bash script to create an Elastic Stack keystore, @@ -84,7 +86,7 @@ func initContainer( return corev1.Container{}, err } - return corev1.Container{ + container := corev1.Container{ // Image will be inherited from pod template defaults ImagePullPolicy: corev1.PullIfNotPresent, Name: InitContainerName, @@ -97,5 +99,11 @@ func initContainer( secureSettingsSecret.VolumeMount(), }, Resources: parameters.Resources, - }, nil + } + + if parameters.SecurityContext != nil { + container.SecurityContext = parameters.SecurityContext + } + + return container, nil } diff --git a/pkg/controller/common/stackmon/sidecar.go b/pkg/controller/common/stackmon/sidecar.go index f7fd672f73..f83b050832 100644 --- a/pkg/controller/common/stackmon/sidecar.go +++ b/pkg/controller/common/stackmon/sidecar.go @@ -47,12 +47,17 @@ func NewMetricBeatSidecar( return BeatSidecar{}, err } image := container.ImageRepository(container.MetricbeatImage, version) - return NewBeatSidecar(ctx, client, "metricbeat", image, resource, monitoring.GetMetricsAssociation(resource), baseConfig, sourceCaVolume) + + // EmptyDir volume so that MetricBeat does not write in the container image, which allows ReadOnlyRootFilesystem: true + emptyDir := volume.NewEmptyDirVolume("metricbeat-data", "/usr/share/metricbeat/data") + return NewBeatSidecar(ctx, client, "metricbeat", image, resource, monitoring.GetMetricsAssociation(resource), baseConfig, sourceCaVolume, emptyDir) } func NewFileBeatSidecar(ctx context.Context, client k8s.Client, resource monitoring.HasMonitoring, version string, baseConfig string, additionalVolume volume.VolumeLike) (BeatSidecar, error) { image := container.ImageRepository(container.FilebeatImage, version) - return NewBeatSidecar(ctx, client, "filebeat", image, resource, monitoring.GetLogsAssociation(resource), baseConfig, additionalVolume) + // EmptyDir volume so that FileBeat does not write in the container image, which allows ReadOnlyRootFilesystem: true + emptyDir := volume.NewEmptyDirVolume("filebeat-data", "/usr/share/filebeat/data") + return NewBeatSidecar(ctx, client, "filebeat", image, resource, monitoring.GetLogsAssociation(resource), baseConfig, additionalVolume, emptyDir) } // BeatSidecar helps with building a beat sidecar container to monitor an Elastic Stack application. It focuses on @@ -65,7 +70,7 @@ type BeatSidecar struct { } func NewBeatSidecar(ctx context.Context, client k8s.Client, beatName string, image string, resource monitoring.HasMonitoring, - associations []commonv1.Association, baseConfig string, additionalVolume volume.VolumeLike, + associations []commonv1.Association, baseConfig string, additionalVolumes ...volume.VolumeLike, ) (BeatSidecar, error) { // build the beat config config, err := newBeatConfig(ctx, client, beatName, resource, associations, baseConfig) @@ -75,8 +80,10 @@ func NewBeatSidecar(ctx context.Context, client k8s.Client, beatName string, ima // add additional volume (ex: CA volume of the monitored ES for Metricbeat) volumes := config.volumes - if additionalVolume != nil { - volumes = append(volumes, additionalVolume) + for _, additionalVolume := range additionalVolumes { + if additionalVolume != nil { + volumes = append(volumes, additionalVolume) + } } // prepare the volume mounts for the beat container from all provided volumes diff --git a/pkg/controller/elasticsearch/driver/driver.go b/pkg/controller/elasticsearch/driver/driver.go index 887c043fe5..b3198ee78a 100644 --- a/pkg/controller/elasticsearch/driver/driver.go +++ b/pkg/controller/elasticsearch/driver/driver.go @@ -42,6 +42,7 @@ import ( "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/observer" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/reconcile" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/remotecluster" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/securitycontext" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/services" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/settings" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/stackmon" @@ -322,6 +323,10 @@ func (d *defaultDriver) Reconcile(ctx context.Context) *reconciler.Results { } } + keystoreParams := initcontainer.KeystoreParams + keystoreSecurityContext := securitycontext.For(d.Version, true) + keystoreParams.SecurityContext = &keystoreSecurityContext + // setup a keystore with secure settings in an init container, if specified by the user keystoreResources, err := keystore.ReconcileResources( ctx, @@ -329,7 +334,7 @@ func (d *defaultDriver) Reconcile(ctx context.Context) *reconciler.Results { &d.ES, esv1.ESNamer, label.NewLabels(k8s.ExtractNamespacedName(&d.ES)), - initcontainer.KeystoreParams, + keystoreParams, ) if err != nil { return results.WithError(err) diff --git a/pkg/controller/elasticsearch/initcontainer/prepare_fs.go b/pkg/controller/elasticsearch/initcontainer/prepare_fs.go index c17235028a..f7707c941b 100644 --- a/pkg/controller/elasticsearch/initcontainer/prepare_fs.go +++ b/pkg/controller/elasticsearch/initcontainer/prepare_fs.go @@ -60,27 +60,27 @@ var ( Array: []LinkedFile{ { Source: stringsutil.Concat(esvolume.XPackFileRealmVolumeMountPath, "/", filerealm.UsersFile), - Target: stringsutil.Concat(EsConfigSharedVolume.ContainerMountPath, "/", filerealm.UsersFile), + Target: stringsutil.Concat(EsConfigSharedVolume.InitContainerMountPath, "/", filerealm.UsersFile), }, { Source: stringsutil.Concat(esvolume.XPackFileRealmVolumeMountPath, "/", user.RolesFile), - Target: stringsutil.Concat(EsConfigSharedVolume.ContainerMountPath, "/", user.RolesFile), + Target: stringsutil.Concat(EsConfigSharedVolume.InitContainerMountPath, "/", user.RolesFile), }, { Source: stringsutil.Concat(esvolume.XPackFileRealmVolumeMountPath, "/", filerealm.UsersRolesFile), - Target: stringsutil.Concat(EsConfigSharedVolume.ContainerMountPath, "/", filerealm.UsersRolesFile), + Target: stringsutil.Concat(EsConfigSharedVolume.InitContainerMountPath, "/", filerealm.UsersRolesFile), }, { Source: stringsutil.Concat(settings.ConfigVolumeMountPath, "/", settings.ConfigFileName), - Target: stringsutil.Concat(EsConfigSharedVolume.ContainerMountPath, "/", settings.ConfigFileName), + Target: stringsutil.Concat(EsConfigSharedVolume.InitContainerMountPath, "/", settings.ConfigFileName), }, { Source: stringsutil.Concat(esvolume.UnicastHostsVolumeMountPath, "/", esvolume.UnicastHostsFile), - Target: stringsutil.Concat(EsConfigSharedVolume.ContainerMountPath, "/", esvolume.UnicastHostsFile), + Target: stringsutil.Concat(EsConfigSharedVolume.InitContainerMountPath, "/", esvolume.UnicastHostsFile), }, { Source: stringsutil.Concat(esvolume.XPackFileRealmVolumeMountPath, "/", esvolume.ServiceAccountsFile), - Target: stringsutil.Concat(EsConfigSharedVolume.ContainerMountPath, "/", esvolume.ServiceAccountsFile), + Target: stringsutil.Concat(EsConfigSharedVolume.InitContainerMountPath, "/", esvolume.ServiceAccountsFile), }, }, } @@ -109,7 +109,6 @@ func NewPrepareFSInitContainer(transportCertificatesVolume volume.SecretVolume, certificatesVolumeMount := transportCertificatesVolume.VolumeMount() certificatesVolumeMount.MountPath = initContainerTransportCertificatesVolumeMountPath - privileged := false volumeMounts := append( // we will also inherit all volume mounts from the main container later on in the pod template builder PluginVolumes.InitContainerVolumeMounts(), @@ -125,13 +124,10 @@ func NewPrepareFSInitContainer(transportCertificatesVolume volume.SecretVolume, container := corev1.Container{ ImagePullPolicy: corev1.PullIfNotPresent, Name: PrepareFilesystemContainerName, - SecurityContext: &corev1.SecurityContext{ - Privileged: &privileged, - }, - Env: defaults.PodDownwardEnvVars(), - Command: []string{"bash", "-c", path.Join(esvolume.ScriptsVolumeMountPath, PrepareFsScriptConfigKey)}, - VolumeMounts: volumeMounts, - Resources: defaultResources, + Env: defaults.PodDownwardEnvVars(), + Command: []string{"bash", "-c", path.Join(esvolume.ScriptsVolumeMountPath, PrepareFsScriptConfigKey)}, + VolumeMounts: volumeMounts, + Resources: defaultResources, } return container, nil diff --git a/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go b/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go index 182fa04140..54f06a3358 100644 --- a/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go +++ b/pkg/controller/elasticsearch/initcontainer/prepare_fs_script.go @@ -95,20 +95,6 @@ var scriptTemplate = template.Must(template.New("").Parse( echo "Starting init script" - ###################### - # Config linking # - ###################### - - # Link individual files from their mount location into the config dir - # to a volume, to be used by the ES container - ln_start=$(date +%s) - {{range .LinkedFiles.Array}} - echo "Linking {{.Source}} to {{.Target}}" - ln -sf {{.Source}} {{.Target}} - {{end}} - echo "File linking duration: $(duration $ln_start) sec." - - ###################### # Files persistence # ###################### @@ -127,6 +113,19 @@ var scriptTemplate = template.Must(template.New("").Parse( {{end}} echo "Files copy duration: $(duration $mv_start) sec." + ###################### + # Config linking # + ###################### + + # Link individual files from their mount location into the config dir + # to a volume, to be used by the ES container + ln_start=$(date +%s) + {{range .LinkedFiles.Array}} + echo "Linking {{.Source}} to {{.Target}}" + ln -sf {{.Source}} {{.Target}} + {{end}} + echo "File linking duration: $(duration $ln_start) sec." + ###################### # Volumes chown # ###################### diff --git a/pkg/controller/elasticsearch/nodespec/podspec.go b/pkg/controller/elasticsearch/nodespec/podspec.go index 52fcda3cd0..b7a8008b67 100644 --- a/pkg/controller/elasticsearch/nodespec/podspec.go +++ b/pkg/controller/elasticsearch/nodespec/podspec.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/initcontainer" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/label" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/network" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/securitycontext" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/settings" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/stackmon" esvolume "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/volume" @@ -99,6 +100,17 @@ func BuildPodTemplateSpec( } annotations := buildAnnotations(es, cfg, keystoreResources, esScripts.ResourceVersion) + // Attempt to detect if the default data directory is mounted in a volume. + // If not, it could be a bug, a misconfiguration, or a custom storage configuration that requires the user to + // explicitly set ReadOnlyRootFilesystem to true. + enableReadOnlyRootFilesystem := false + for _, volumeMount := range volumeMounts { + if volumeMount.Name == esvolume.ElasticsearchDataVolumeName { + enableReadOnlyRootFilesystem = true + break + } + } + // build the podTemplate until we have the effective resources configured builder = builder. WithLabels(labels). @@ -115,6 +127,8 @@ func BuildPodTemplateSpec( WithInitContainers(initContainers...). // inherit all env vars from main containers to allow Elasticsearch tools that read ES config to work in initContainers WithInitContainerDefaults(builder.MainContainer().Env...). + // set a default security context for both the Containers and the InitContainers + WithContainersSecurityContext(securitycontext.For(ver, enableReadOnlyRootFilesystem)). WithPreStopHook(*NewPreStopHook()) builder, err = stackmon.WithMonitoring(ctx, client, builder, es) diff --git a/pkg/controller/elasticsearch/nodespec/podspec_test.go b/pkg/controller/elasticsearch/nodespec/podspec_test.go index a77fafeaf1..960506377b 100644 --- a/pkg/controller/elasticsearch/nodespec/podspec_test.go +++ b/pkg/controller/elasticsearch/nodespec/podspec_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ptr "k8s.io/utils/pointer" commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" @@ -261,6 +262,14 @@ func TestBuildPodTemplateSpec(t *testing.T) { initContainers[i].Env = initContainerEnv initContainers[i].VolumeMounts = append(initContainers[i].VolumeMounts, volumeMounts...) initContainers[i].Resources = DefaultResources + initContainers[i].SecurityContext = &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + Privileged: ptr.Bool(false), + ReadOnlyRootFilesystem: ptr.Bool(false), + AllowPrivilegeEscalation: ptr.Bool(false), + } } // remove the prepare-fs init-container from comparison, it has its own volume mount logic @@ -304,10 +313,27 @@ func TestBuildPodTemplateSpec(t *testing.T) { Env: initContainerEnv, VolumeMounts: volumeMounts, Resources: DefaultResources, // inherited from main container + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + Privileged: ptr.Bool(false), + // ReadOnlyRootFilesystem is expected to be false in this test because there is no data volume. + ReadOnlyRootFilesystem: ptr.Bool(false), + AllowPrivilegeEscalation: ptr.Bool(false), + }, }), Containers: []corev1.Container{ { Name: "additional-container", + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + Privileged: ptr.Bool(false), + ReadOnlyRootFilesystem: ptr.Bool(false), + AllowPrivilegeEscalation: ptr.Bool(false), + }, }, { Name: "elasticsearch", @@ -325,6 +351,14 @@ func TestBuildPodTemplateSpec(t *testing.T) { Lifecycle: &corev1.Lifecycle{ PreStop: NewPreStopHook(), }, + SecurityContext: &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + Privileged: ptr.Bool(false), + ReadOnlyRootFilesystem: ptr.Bool(false), + AllowPrivilegeEscalation: ptr.Bool(false), + }, }, }, TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, diff --git a/pkg/controller/elasticsearch/nodespec/volumes.go b/pkg/controller/elasticsearch/nodespec/volumes.go index d8a51e1fb9..e7a0fb0726 100644 --- a/pkg/controller/elasticsearch/nodespec/volumes.go +++ b/pkg/controller/elasticsearch/nodespec/volumes.go @@ -60,6 +60,10 @@ func buildVolumes( esvolume.FileSettingsVolumeName, esvolume.FileSettingsVolumeMountPath, ) + tmpVolume := volume.NewEmptyDirVolume( + esvolume.TempVolumeName, + esvolume.TempVolumeMountPath, + ) // append future volumes from PVCs (not resolved to a claim yet) persistentVolumes := make([]corev1.Volume, 0, len(nodeSpec.VolumeClaimTemplates)) for _, claimTemplate := range nodeSpec.VolumeClaimTemplates { @@ -89,6 +93,7 @@ func buildVolumes( scriptsVolume.Volume(), configVolume.Volume(), downwardAPIVolume.Volume(), + tmpVolume.Volume(), )...) if keystoreResources != nil { volumes = append(volumes, keystoreResources.Volume) @@ -106,6 +111,7 @@ func buildVolumes( scriptsVolume.VolumeMount(), configVolume.VolumeMount(), downwardAPIVolume.VolumeMount(), + tmpVolume.VolumeMount(), ) // version gate for the file-based settings volume and volumeMounts diff --git a/pkg/controller/elasticsearch/securitycontext/securitycontext.go b/pkg/controller/elasticsearch/securitycontext/securitycontext.go new file mode 100644 index 0000000000..37080ea518 --- /dev/null +++ b/pkg/controller/elasticsearch/securitycontext/securitycontext.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package securitycontext + +import ( + corev1 "k8s.io/api/core/v1" + ptr "k8s.io/utils/pointer" + + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" +) + +var ( + // MinStackVersion is the minimum Stack version to use RunAsNonRoot with the Elasticsearch image. + // Before 8.8.0 Elasticsearch image runs has non-numeric user. + // Refer to https://github.com/elastic/elasticsearch/pull/95390 for more information. + MinStackVersion = version.MustParse("8.8.0-SNAPSHOT") +) + +func For(ver version.Version, enableReadOnlyRootFilesystem bool) corev1.SecurityContext { + sc := corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + Privileged: ptr.Bool(false), + ReadOnlyRootFilesystem: ptr.Bool(enableReadOnlyRootFilesystem), + AllowPrivilegeEscalation: ptr.Bool(false), + } + if ver.LT(MinStackVersion) { + return sc + } + sc.RunAsNonRoot = ptr.Bool(true) + return sc +} + +func DefaultBeatSecurityContext() *corev1.SecurityContext { + return &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + Privileged: ptr.Bool(false), + RunAsNonRoot: ptr.Bool(true), + ReadOnlyRootFilesystem: ptr.Bool(true), + AllowPrivilegeEscalation: ptr.Bool(false), + } +} diff --git a/pkg/controller/elasticsearch/stackmon/sidecar.go b/pkg/controller/elasticsearch/stackmon/sidecar.go index 65abe350f1..903e4b4743 100644 --- a/pkg/controller/elasticsearch/stackmon/sidecar.go +++ b/pkg/controller/elasticsearch/stackmon/sidecar.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/stackmon" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/stackmon/monitoring" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/network" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/securitycontext" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/user" esvolume "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/volume" "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" @@ -49,11 +50,17 @@ func Metricbeat(ctx context.Context, client k8s.Client, es esv1.Elasticsearch) ( if err != nil { return stackmon.BeatSidecar{}, err } + metricbeat.Container.SecurityContext = securitycontext.DefaultBeatSecurityContext() return metricbeat, nil } func Filebeat(ctx context.Context, client k8s.Client, es esv1.Elasticsearch) (stackmon.BeatSidecar, error) { - return stackmon.NewFileBeatSidecar(ctx, client, &es, es.Spec.Version, filebeatConfig, nil) + fileBeat, err := stackmon.NewFileBeatSidecar(ctx, client, &es, es.Spec.Version, filebeatConfig, nil) + if err != nil { + return stackmon.BeatSidecar{}, err + } + fileBeat.Container.SecurityContext = securitycontext.DefaultBeatSecurityContext() + return fileBeat, nil } // WithMonitoring updates the Elasticsearch Pod template builder to deploy Metricbeat and Filebeat in sidecar containers diff --git a/pkg/controller/elasticsearch/stackmon/sidecar_test.go b/pkg/controller/elasticsearch/stackmon/sidecar_test.go index 0dbce00e56..bf65d7612c 100644 --- a/pkg/controller/elasticsearch/stackmon/sidecar_test.go +++ b/pkg/controller/elasticsearch/stackmon/sidecar_test.go @@ -9,8 +9,10 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ptr "k8s.io/utils/pointer" commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" @@ -91,8 +93,8 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 2, esEnvVarsLength: 0, - podVolumesLength: 3, - beatVolumeMountsLength: 3, + podVolumesLength: 4, + beatVolumeMountsLength: 4, }, { name: "with logs monitoring", @@ -104,8 +106,8 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 2, esEnvVarsLength: 1, - podVolumesLength: 2, - beatVolumeMountsLength: 3, + podVolumesLength: 3, + beatVolumeMountsLength: 4, }, { name: "with metrics and logs monitoring", @@ -118,8 +120,8 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 3, esEnvVarsLength: 1, - podVolumesLength: 4, - beatVolumeMountsLength: 3, + podVolumesLength: 6, + beatVolumeMountsLength: 4, }, { name: "with metrics and logs monitoring with different es ref", @@ -132,8 +134,8 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 3, esEnvVarsLength: 1, - podVolumesLength: 5, - beatVolumeMountsLength: 3, + podVolumesLength: 7, + beatVolumeMountsLength: 4, }, } @@ -152,6 +154,7 @@ func TestWithMonitoring(t *testing.T) { for _, c := range builder.PodTemplate.Spec.Containers { if c.Name == "metricbeat" { assert.Equal(t, tc.beatVolumeMountsLength, len(c.VolumeMounts)) + assertSecurityContext(t, c.SecurityContext) } } } @@ -159,9 +162,28 @@ func TestWithMonitoring(t *testing.T) { for _, c := range builder.PodTemplate.Spec.Containers { if c.Name == "filebeat" { assert.Equal(t, tc.beatVolumeMountsLength, len(c.VolumeMounts)) + assertSecurityContext(t, c.SecurityContext) } } } }) } } + +func assertSecurityContext(t *testing.T, securityContext *corev1.SecurityContext) { + t.Helper() + require.NotNil(t, securityContext) + require.Equal(t, ptr.Bool(true), securityContext.RunAsNonRoot, "RunAsNonRoot was expected to be true") + require.NotNil(t, securityContext.Privileged) + require.False(t, *securityContext.Privileged) + require.NotNil(t, securityContext.Capabilities) + droppedCapabilities := securityContext.Capabilities.Drop + hasDropAllCapability := false + for _, capability := range droppedCapabilities { + if capability == "ALL" { + hasDropAllCapability = true + break + } + } + require.True(t, hasDropAllCapability, "ALL capability not found in securityContext.Capabilities.Drop") +} diff --git a/pkg/controller/elasticsearch/volume/names.go b/pkg/controller/elasticsearch/volume/names.go index 3205751f81..0e7e430f6d 100644 --- a/pkg/controller/elasticsearch/volume/names.go +++ b/pkg/controller/elasticsearch/volume/names.go @@ -48,4 +48,7 @@ const ( FileSettingsVolumeName = "file-settings" FileSettingsVolumeMountPath = "/usr/share/elasticsearch/config/operator" + + TempVolumeName = "tmp-volume" + TempVolumeMountPath = "/tmp" ) diff --git a/pkg/controller/kibana/stackmon/sidecar_test.go b/pkg/controller/kibana/stackmon/sidecar_test.go index 384c86a637..671f5608ed 100644 --- a/pkg/controller/kibana/stackmon/sidecar_test.go +++ b/pkg/controller/kibana/stackmon/sidecar_test.go @@ -102,8 +102,8 @@ func TestWithMonitoring(t *testing.T) { return sampleKb }, containersLength: 2, - podVolumesLength: 3, - beatVolumeMountsLength: 3, + podVolumesLength: 4, + beatVolumeMountsLength: 4, }, { name: "with logs monitoring", @@ -114,8 +114,8 @@ func TestWithMonitoring(t *testing.T) { return sampleKb }, containersLength: 2, - podVolumesLength: 3, - beatVolumeMountsLength: 3, + podVolumesLength: 4, + beatVolumeMountsLength: 4, }, { name: "with metrics and logs monitoring", @@ -127,8 +127,8 @@ func TestWithMonitoring(t *testing.T) { return sampleKb }, containersLength: 3, - podVolumesLength: 5, - beatVolumeMountsLength: 3, + podVolumesLength: 7, + beatVolumeMountsLength: 4, }, { name: "with metrics and logs monitoring with different es ref", @@ -140,8 +140,8 @@ func TestWithMonitoring(t *testing.T) { return sampleKb }, containersLength: 3, - podVolumesLength: 6, - beatVolumeMountsLength: 3, + podVolumesLength: 8, + beatVolumeMountsLength: 4, }, } diff --git a/test/e2e/test/elasticsearch/check_securitycontext.go b/test/e2e/test/elasticsearch/check_securitycontext.go new file mode 100644 index 0000000000..b3bc75575b --- /dev/null +++ b/test/e2e/test/elasticsearch/check_securitycontext.go @@ -0,0 +1,68 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package elasticsearch + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + ptr "k8s.io/utils/pointer" + + esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/securitycontext" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" +) + +func CheckContainerSecurityContext(es esv1.Elasticsearch, k *test.K8sClient) test.Step { + //nolint:thelper + return test.Step{ + Name: "Elasticsearch containers SecurityContext should be set", + Test: func(t *testing.T) { + usesEmptyDir := usesEmptyDir(es) + if usesEmptyDir { + return + } + pods, err := k.GetPods(test.ESPodListOptions(es.Namespace, es.Name)...) + require.NoError(t, err) + + ver := version.MustParse(es.Spec.Version) + for _, p := range pods { + for _, c := range p.Spec.Containers { + assertSecurityContext(t, ver, c.SecurityContext, c.Image) + } + for _, c := range p.Spec.InitContainers { + assertSecurityContext(t, ver, c.SecurityContext, c.Image) + } + } + }, + } +} + +func assertSecurityContext(t *testing.T, ver version.Version, securityContext *corev1.SecurityContext, image string) { + t.Helper() + require.NotNil(t, securityContext) + if strings.HasPrefix(image, "docker.elastic.co/elasticsearch/elasticsearch") && ver.LT(securitycontext.MinStackVersion) { + require.Nil(t, securityContext.RunAsNonRoot, "RunAsNonRoot was expected to be nil") + } else { + require.Equal(t, ptr.Bool(true), securityContext.RunAsNonRoot, "RunAsNonRoot was expected to be true") + } + require.NotNil(t, securityContext.Privileged) + require.False(t, *securityContext.Privileged) + + // OpenShift may add others Capabilities. We only check that ALL is included in "Drop". + require.NotNil(t, securityContext.Capabilities) + droppedCapabilities := securityContext.Capabilities.Drop + hasDropAllCapability := false + for _, capability := range droppedCapabilities { + if capability == "ALL" { + hasDropAllCapability = true + break + } + } + require.True(t, hasDropAllCapability, "ALL capability not found in securityContext.Capabilities.Drop") +} diff --git a/test/e2e/test/elasticsearch/checks_k8s.go b/test/e2e/test/elasticsearch/checks_k8s.go index 9f3122a5a4..845543f335 100644 --- a/test/e2e/test/elasticsearch/checks_k8s.go +++ b/test/e2e/test/elasticsearch/checks_k8s.go @@ -51,6 +51,7 @@ func (b Builder) CheckK8sTestSteps(k *test.K8sClient) test.StepList { CheckClusterHealth(b, k), CheckESPassword(b, k), CheckESDataVolumeType(b.Elasticsearch, k), + CheckContainerSecurityContext(b.Elasticsearch, k), CheckClusterUUIDAnnotation(b.Elasticsearch, k), } } From 326eb0022c57991be8fd1d06ef904b92bd0b0c17 Mon Sep 17 00:00:00 2001 From: Michael Morello Date: Thu, 27 Apr 2023 15:44:47 +0200 Subject: [PATCH 02/26] [Fleet] Deprecate is_default and is_default_fleet_server flags (#6724) * Introduce HasWarnings interface * Update samples in config/recipes * Update example in docs * Update CRD/API documentation * Update Helm Charts * Add checkPolicyID for next major release --- config/crds/v1/all-crds.yaml | 6 +- .../v1/bases/agent.k8s.elastic.co_agents.yaml | 6 +- .../elastic-agent/fleet-apm-integration.yaml | 4 +- .../fleet-custom-logs-integration.yaml | 6 +- .../fleet-kubernetes-integration.yaml | 4 +- deploy/eck-agent/examples/fleet-agents.yaml | 2 + deploy/eck-agent/values.yaml | 3 + deploy/eck-fleet-server/values.yaml | 3 + .../eck-operator-crds/templates/all-crds.yaml | 6 +- .../examples/agent/fleet-agents.yaml | 6 +- .../agent-fleet.asciidoc | 5 +- docs/reference/api-docs.asciidoc | 2 +- pkg/apis/agent/v1alpha1/agent_types.go | 8 ++- pkg/apis/agent/v1alpha1/validations.go | 15 +++++ pkg/apis/agent/v1alpha1/validations_test.go | 60 ++++++++++++++++++ pkg/apis/agent/v1alpha1/webhook.go | 13 ++++ pkg/controller/agent/fleet.go | 17 +++-- pkg/controller/agent/fleet_test.go | 63 ++++++++++++------- pkg/controller/common/webhook/warnings.go | 21 +++++++ pkg/controller/common/webhook/webhook.go | 12 ++-- pkg/controller/common/webhook/webhook_test.go | 35 +++++++++++ pkg/utils/test/events.go | 32 ++++++++++ 22 files changed, 274 insertions(+), 55 deletions(-) create mode 100644 pkg/controller/common/webhook/warnings.go create mode 100644 pkg/utils/test/events.go diff --git a/config/crds/v1/all-crds.yaml b/config/crds/v1/all-crds.yaml index 858b949159..6ce102bc14 100644 --- a/config/crds/v1/all-crds.yaml +++ b/config/crds/v1/all-crds.yaml @@ -757,9 +757,9 @@ spec: - fleet type: string policyID: - description: PolicyID optionally determines into which Agent Policy - this Agent will be enrolled. If left empty the default policy will - be used. + description: PolicyID determines into which Agent Policy this Agent + will be enrolled. This field will become mandatory in a future release, + default policies are deprecated since 8.1.0. type: string revisionHistoryLimit: description: RevisionHistoryLimit is the number of revisions to retain diff --git a/config/crds/v1/bases/agent.k8s.elastic.co_agents.yaml b/config/crds/v1/bases/agent.k8s.elastic.co_agents.yaml index 60d1b5dcd0..da3d1416dd 100644 --- a/config/crds/v1/bases/agent.k8s.elastic.co_agents.yaml +++ b/config/crds/v1/bases/agent.k8s.elastic.co_agents.yaml @@ -16310,9 +16310,9 @@ spec: - fleet type: string policyID: - description: PolicyID optionally determines into which Agent Policy - this Agent will be enrolled. If left empty the default policy will - be used. + description: PolicyID determines into which Agent Policy this Agent + will be enrolled. This field will become mandatory in a future release, + default policies are deprecated since 8.1.0. type: string revisionHistoryLimit: description: RevisionHistoryLimit is the number of revisions to retain diff --git a/config/recipes/elastic-agent/fleet-apm-integration.yaml b/config/recipes/elastic-agent/fleet-apm-integration.yaml index 82ebba99ca..e0421def77 100644 --- a/config/recipes/elastic-agent/fleet-apm-integration.yaml +++ b/config/recipes/elastic-agent/fleet-apm-integration.yaml @@ -22,7 +22,6 @@ spec: xpack.fleet.agentPolicies: - name: Fleet Server on ECK policy id: eck-fleet-server - is_default_fleet_server: true namespace: default monitoring_enabled: - logs @@ -40,7 +39,6 @@ spec: - logs - metrics unenroll_timeout: 900 - is_default: true package_policies: - name: system-1 id: system-1 @@ -80,6 +78,7 @@ spec: - name: elasticsearch mode: fleet fleetServerEnabled: true + policyID: eck-fleet-server deployment: replicas: 1 podTemplate: @@ -100,6 +99,7 @@ spec: fleetServerRef: name: fleet-server mode: fleet + policyID: eck-agent deployment: replicas: 1 podTemplate: diff --git a/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml b/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml index b538a16fe9..15e0455064 100644 --- a/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml +++ b/config/recipes/elastic-agent/fleet-custom-logs-integration.yaml @@ -27,7 +27,6 @@ spec: - logs - metrics unenroll_timeout: 900 - is_default_fleet_server: true package_policies: - name: fleet_server-1 id: fleet_server-1 @@ -39,8 +38,7 @@ spec: monitoring_enabled: - logs - metrics - unenroll_timeout: 900 - is_default: true + unenroll_timeout: 900 package_policies: - name: system-1 id: system-1 @@ -89,6 +87,7 @@ spec: - name: elasticsearch mode: fleet fleetServerEnabled: true + policyID: eck-fleet-server deployment: replicas: 1 podTemplate: @@ -109,6 +108,7 @@ spec: fleetServerRef: name: fleet-server mode: fleet + policyID: eck-agent daemonSet: podTemplate: spec: diff --git a/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml b/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml index 6d308e775e..685eb01b1d 100644 --- a/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml +++ b/config/recipes/elastic-agent/fleet-kubernetes-integration.yaml @@ -27,7 +27,6 @@ spec: - logs - metrics unenroll_timeout: 900 - is_default_fleet_server: true package_policies: - name: fleet_server-1 id: fleet_server-1 @@ -40,7 +39,6 @@ spec: - logs - metrics unenroll_timeout: 900 - is_default: true package_policies: - package: name: system @@ -73,6 +71,7 @@ spec: - name: elasticsearch mode: fleet fleetServerEnabled: true + policyID: eck-fleet-server deployment: replicas: 1 podTemplate: @@ -93,6 +92,7 @@ spec: fleetServerRef: name: fleet-server mode: fleet + policyID: eck-agent daemonSet: podTemplate: spec: diff --git a/deploy/eck-agent/examples/fleet-agents.yaml b/deploy/eck-agent/examples/fleet-agents.yaml index 1754ab4581..f9e836b5b3 100644 --- a/deploy/eck-agent/examples/fleet-agents.yaml +++ b/deploy/eck-agent/examples/fleet-agents.yaml @@ -4,6 +4,8 @@ version: 8.8.0-SNAPSHOT spec: + # This must match the name of an Agent policy. + policyID: eck-agent # This must match the name of the fleet server installed from eck-fleet-server chart. fleetServerRef: name: eck-fleet-server diff --git a/deploy/eck-agent/values.yaml b/deploy/eck-agent/values.yaml index 7d2af97595..6c9b77b7be 100644 --- a/deploy/eck-agent/values.yaml +++ b/deploy/eck-agent/values.yaml @@ -29,6 +29,9 @@ labels: {} annotations: {} spec: + # policyID determines into which Agent Policy this Agent will be enrolled. + # policyID: eck-agent + # Referenced resources are below and depending on the setup, at least one is required for a functional Agent. # ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-elastic-agent-fleet-configuration.html#k8s-elastic-agent-fleet-configuration-setting-referenced-resources # diff --git a/deploy/eck-fleet-server/values.yaml b/deploy/eck-fleet-server/values.yaml index dee8237f69..608e0b3306 100644 --- a/deploy/eck-fleet-server/values.yaml +++ b/deploy/eck-fleet-server/values.yaml @@ -29,6 +29,9 @@ labels: {} annotations: {} spec: + # policyID determines into which Agent Policy this Fleet Server will be enrolled. + policyID: eck-fleet-server + # Referenced resources are below and both elasticsearchRefs and kibanaRef are required for a functional Fleet Server. # ref: https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-beat-configuration.html#k8s-beat-connect-es # diff --git a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml index 35a5cd9471..499ba09063 100644 --- a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml +++ b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml @@ -763,9 +763,9 @@ spec: - fleet type: string policyID: - description: PolicyID optionally determines into which Agent Policy - this Agent will be enrolled. If left empty the default policy will - be used. + description: PolicyID determines into which Agent Policy this Agent + will be enrolled. This field will become mandatory in a future release, + default policies are deprecated since 8.1.0. type: string revisionHistoryLimit: description: RevisionHistoryLimit is the number of revisions to retain diff --git a/deploy/eck-stack/examples/agent/fleet-agents.yaml b/deploy/eck-stack/examples/agent/fleet-agents.yaml index 722cb4c8b4..abcf84ff3c 100644 --- a/deploy/eck-stack/examples/agent/fleet-agents.yaml +++ b/deploy/eck-stack/examples/agent/fleet-agents.yaml @@ -55,7 +55,6 @@ eck-kibana: monitoring_enabled: - logs - metrics - is_default_fleet_server: true package_policies: - name: fleet_server-1 id: fleet_server-1 @@ -68,7 +67,6 @@ eck-kibana: - logs - metrics unenroll_timeout: 900 - is_default: true package_policies: - package: name: system @@ -81,6 +79,8 @@ eck-agent: enabled: true spec: + # Agent policy to be used. + policyID: eck-agent # Reference to ECK-managed Kibana instance. # kibanaRef: @@ -111,6 +111,8 @@ eck-fleet-server: fullnameOverride: "fleet-server" spec: + # Agent policy to be used. + policyID: eck-fleet-server kibanaRef: name: kibana elasticsearchRefs: diff --git a/docs/orchestrating-elastic-stack-applications/agent-fleet.asciidoc b/docs/orchestrating-elastic-stack-applications/agent-fleet.asciidoc index a9fa187ff2..44658ddfdf 100644 --- a/docs/orchestrating-elastic-stack-applications/agent-fleet.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/agent-fleet.asciidoc @@ -36,6 +36,7 @@ spec: - name: elasticsearch-quickstart mode: fleet fleetServerEnabled: true + policyID: eck-fleet-server deployment: replicas: 1 podTemplate: @@ -57,6 +58,7 @@ spec: fleetServerRef: name: fleet-server-quickstart mode: fleet + policyID: eck-agent daemonSet: podTemplate: spec: @@ -88,7 +90,6 @@ spec: xpack.fleet.agentPolicies: - name: Fleet Server on ECK policy id: eck-fleet-server - is_default_fleet_server: true namespace: default monitoring_enabled: - logs @@ -106,7 +107,6 @@ spec: - logs - metrics unenroll_timeout: 900 - is_default: true package_policies: - name: system-1 id: system-1 @@ -273,7 +273,6 @@ spec: xpack.fleet.agentPolicies: - name: Fleet Server on ECK policy id: eck-fleet-server - is_default_fleet_server: true namespace: default monitoring_enabled: - logs diff --git a/docs/reference/api-docs.asciidoc b/docs/reference/api-docs.asciidoc index 04cdc02ee7..c2d94ffc7f 100644 --- a/docs/reference/api-docs.asciidoc +++ b/docs/reference/api-docs.asciidoc @@ -96,7 +96,7 @@ AgentSpec defines the desired state of the Agent | *`http`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-httpconfig[$$HTTPConfig$$]__ | HTTP holds the HTTP layer configuration for the Agent in Fleet mode with Fleet Server enabled. | *`mode`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-agent-v1alpha1-agentmode[$$AgentMode$$]__ | Mode specifies the source of configuration for the Agent. The configuration can be specified locally through `config` or `configRef` (`standalone` mode), or come from Fleet during runtime (`fleet` mode). Defaults to `standalone` mode. | *`fleetServerEnabled`* __boolean__ | FleetServerEnabled determines whether this Agent will launch Fleet Server. Don't set unless `mode` is set to `fleet`. -| *`policyID`* __string__ | PolicyID optionally determines into which Agent Policy this Agent will be enrolled. If left empty the default policy will be used. +| *`policyID`* __string__ | PolicyID determines into which Agent Policy this Agent will be enrolled. This field will become mandatory in a future release, default policies are deprecated since 8.1.0. | *`kibanaRef`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-objectselector[$$ObjectSelector$$]__ | KibanaRef is a reference to Kibana where Fleet should be set up and this Agent should be enrolled. Don't set unless `mode` is set to `fleet`. | *`fleetServerRef`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-objectselector[$$ObjectSelector$$]__ | FleetServerRef is a reference to Fleet Server that this Agent should connect to to obtain it's configuration. Don't set unless `mode` is set to `fleet`. |=== diff --git a/pkg/apis/agent/v1alpha1/agent_types.go b/pkg/apis/agent/v1alpha1/agent_types.go index a9bfe530c9..08f0d54d83 100644 --- a/pkg/apis/agent/v1alpha1/agent_types.go +++ b/pkg/apis/agent/v1alpha1/agent_types.go @@ -24,6 +24,10 @@ const ( FleetServerServiceAccount commonv1.ServiceAccountName = "fleet-server" ) +var ( + MandatoryPolicyIDVersion = version.MustParse("9.0.0-SNAPSHOT") +) + // AgentSpec defines the desired state of the Agent type AgentSpec struct { // Version of the Agent. @@ -87,8 +91,8 @@ type AgentSpec struct { // +kubebuilder:validation:Optional FleetServerEnabled bool `json:"fleetServerEnabled,omitempty"` - // PolicyID optionally determines into which Agent Policy this Agent will be enrolled. If left empty the default - // policy will be used. + // PolicyID determines into which Agent Policy this Agent will be enrolled. + // This field will become mandatory in a future release, default policies are deprecated since 8.1.0. // +kubebuilder:validation:Optional PolicyID string `json:"policyID,omitempty"` diff --git a/pkg/apis/agent/v1alpha1/validations.go b/pkg/apis/agent/v1alpha1/validations.go index 22a6af7802..423e8fcadd 100644 --- a/pkg/apis/agent/v1alpha1/validations.go +++ b/pkg/apis/agent/v1alpha1/validations.go @@ -16,6 +16,7 @@ import ( var ( defaultChecks = []func(*Agent) field.ErrorList{ + checkPolicyID, checkNoUnknownFields, checkNameLength, checkSupportedVersion, @@ -54,6 +55,20 @@ func checkSupportedVersion(a *Agent) field.ErrorList { return commonv1.CheckSupportedStackVersion(a.Spec.Version, version.SupportedAgentVersions) } +func checkPolicyID(a *Agent) field.ErrorList { + v, err := commonv1.ParseVersion(a.Spec.Version) + if err != nil { + return err + } + if v.GTE(MandatoryPolicyIDVersion) && len(a.Spec.PolicyID) == 0 { + msg := "Agent policyID is mandatory" + return field.ErrorList{ + field.Required(field.NewPath("spec").Child("policyID"), msg), + } + } + return nil +} + func checkAtMostOneDeploymentOption(a *Agent) field.ErrorList { if a.Spec.DaemonSet != nil && a.Spec.Deployment != nil { msg := "Specify either daemonSet or deployment, not both" diff --git a/pkg/apis/agent/v1alpha1/validations_test.go b/pkg/apis/agent/v1alpha1/validations_test.go index 50b36b2e95..2a8c41dc74 100644 --- a/pkg/apis/agent/v1alpha1/validations_test.go +++ b/pkg/apis/agent/v1alpha1/validations_test.go @@ -65,6 +65,66 @@ func Test_checkSupportedVersion(t *testing.T) { } } +func Test_checkPolicyID(t *testing.T) { + expectedError := field.ErrorList{ + &field.Error{ + Type: field.ErrorTypeRequired, + Field: "spec.policyID", + BadValue: "", + Detail: "Agent policyID is mandatory", + }} + tests := []struct { + name string + beat Agent + wantErr field.ErrorList + }{ + { + name: "no policyID required for 8.x", + beat: Agent{ + Spec: AgentSpec{ + Version: "8.5.99", + }, + }, + wantErr: nil, + }, + { + name: "policyID required for 9.x", + beat: Agent{ + Spec: AgentSpec{ + Version: "9.0.0", + }, + }, + wantErr: expectedError, + }, + { + name: "policyID required for 9.0.0-SNAPSHOT", + beat: Agent{ + Spec: AgentSpec{ + Version: "9.0.0-SNAPSHOT", + }, + }, + wantErr: expectedError, + }, + { + name: "policyID set for 9.x", + beat: Agent{ + Spec: AgentSpec{ + Version: "9.0.0", + PolicyID: "foo", + }, + }, + wantErr: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := checkPolicyID(&tc.beat) + assert.Equal(t, tc.wantErr, got) + }) + } +} + func Test_checkSpec(t *testing.T) { tests := []struct { name string diff --git a/pkg/apis/agent/v1alpha1/webhook.go b/pkg/apis/agent/v1alpha1/webhook.go index 7231069081..b2bda2c1ee 100644 --- a/pkg/apis/agent/v1alpha1/webhook.go +++ b/pkg/apis/agent/v1alpha1/webhook.go @@ -6,6 +6,7 @@ package v1alpha1 import ( "errors" + "fmt" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -19,6 +20,8 @@ import ( const ( // webhookPath is the HTTP path for the Elastic Agent validating webhook. webhookPath = "/validate-agent-k8s-elastic-co-v1alpha1-agent" + + MissingPolicyIDMessage = "spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release" ) var ( @@ -30,6 +33,16 @@ var ( var _ webhook.Validator = &Agent{} +func (a *Agent) GetWarnings() []string { + if a == nil { + return nil + } + if len(a.Spec.PolicyID) == 0 { + return []string{fmt.Sprintf("%s %s/%s: %s", Kind, a.Namespace, a.Name, MissingPolicyIDMessage)} + } + return nil +} + // ValidateCreate is called by the validating webhook to validate the create operation. // Satisfies the webhook.Validator interface. func (a *Agent) ValidateCreate() error { diff --git a/pkg/controller/agent/fleet.go b/pkg/controller/agent/fleet.go index 9282e6fd84..0f552869ca 100644 --- a/pkg/controller/agent/fleet.go +++ b/pkg/controller/agent/fleet.go @@ -17,12 +17,15 @@ import ( "github.com/go-logr/logr" "github.com/pkg/errors" "go.elastic.co/apm/module/apmhttp/v2" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/reconcile" agentv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/agent/v1alpha1" commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" v1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/events" commonhttp "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/http" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/tracing" @@ -252,7 +255,7 @@ func maybeReconcileFleetEnrollment(params Params, result *reconciler.Results) En } token, err := reconcileEnrollmentToken( - params.Context, params.Agent, params.Client, + params, newFleetAPI( params.OperatorParams.Dialer, kbConnectionSettings, @@ -274,8 +277,10 @@ func isKibanaReachable(ctx context.Context, client k8s.Client, kibanaNSN types.N return true, nil } -func reconcileEnrollmentToken(ctx context.Context, agent agentv1alpha1.Agent, client k8s.Client, api fleetAPI) (EnrollmentAPIKey, error) { +func reconcileEnrollmentToken(params Params, api fleetAPI) (EnrollmentAPIKey, error) { defer api.client.CloseIdleConnections() + agent := params.Agent + ctx := params.Context // do we have an existing token that we have rolled out previously? tokenName, exists := agent.Annotations[FleetTokenAnnotation] if !exists { @@ -285,7 +290,7 @@ func reconcileEnrollmentToken(ctx context.Context, agent agentv1alpha1.Agent, cl } } // what policy should we enroll this agent in? - policyID, err := reconcilePolicyID(ctx, agent, api) + policyID, err := findPolicyID(ctx, params.EventRecorder, agent, api) if err != nil { return EnrollmentAPIKey{}, err } @@ -323,17 +328,19 @@ FindOrCreate: agent.Annotations = map[string]string{} } agent.Annotations[FleetTokenAnnotation] = key.ID - err = client.Update(ctx, &agent) + err = params.Client.Update(ctx, &agent) if err != nil { return EnrollmentAPIKey{}, err } return key, nil } -func reconcilePolicyID(ctx context.Context, agent agentv1alpha1.Agent, api fleetAPI) (string, error) { +func findPolicyID(ctx context.Context, recorder record.EventRecorder, agent agentv1alpha1.Agent, api fleetAPI) (string, error) { if agent.Spec.PolicyID != "" { return agent.Spec.PolicyID, nil } + recorder.Event(&agent, corev1.EventTypeWarning, events.EventReasonValidation, agentv1alpha1.MissingPolicyIDMessage) + ulog.FromContext(ctx).Info(agentv1alpha1.MissingPolicyIDMessage) if agent.Spec.FleetServerEnabled { return api.defaultFleetServerPolicyID(ctx) } diff --git a/pkg/controller/agent/fleet_test.go b/pkg/controller/agent/fleet_test.go index d7427a4c17..b24f3cc824 100644 --- a/pkg/controller/agent/fleet_test.go +++ b/pkg/controller/agent/fleet_test.go @@ -14,12 +14,15 @@ import ( "testing" "github.com/pkg/errors" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" "github.com/elastic/cloud-on-k8s/v2/pkg/apis/agent/v1alpha1" "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" ulog "github.com/elastic/cloud-on-k8s/v2/pkg/utils/log" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/test" ) var ( @@ -44,10 +47,11 @@ func Test_reconcileEnrollmentToken(t *testing.T) { api *mockFleetAPI } tests := []struct { - name string - args args - want EnrollmentAPIKey - wantErr bool + name string + args args + want EnrollmentAPIKey + wantErr bool + wantEvents []string }{ { name: "Agent annotated and fixed policy", @@ -64,8 +68,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"GET", "/api/fleet/enrollment_api_keys/some-token-id"}: {code: 200, body: enrollmentKeySample}, }), }, - want: asObject(enrollmentKeySample), - wantErr: false, + want: asObject(enrollmentKeySample), + wantEvents: nil, // PolicyID is provided. + wantErr: false, }, { name: "Agent annotated but default policy", @@ -89,8 +94,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"POST", "/api/fleet/enrollment_api_keys"}: {code: 200, body: enrollmentKeySample}, }), }, - want: asObject(enrollmentKeySample), - wantErr: false, + want: asObject(enrollmentKeySample), + wantEvents: []string{"Warning Validation spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release"}, + wantErr: false, }, { name: "Agent annotated but token does not exist", @@ -111,8 +117,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"GET", "/api/fleet/enrollment_api_keys"}: {code: 200, body: enrollmentKeyListSample}, }), }, - want: asObject(enrollmentKeySample), - wantErr: false, + want: asObject(enrollmentKeySample), + wantEvents: nil, // PolicyID is provided. + wantErr: false, }, { name: "Agent annotated but token is invalid", @@ -133,8 +140,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"POST", "/api/fleet/enrollment_api_keys"}: {code: 200, body: enrollmentKeySample}, }), }, - want: asObject(enrollmentKeySample), - wantErr: false, + want: asObject(enrollmentKeySample), + wantEvents: []string{"Warning Validation spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release"}, + wantErr: false, }, { name: "Agent not annotated yet", @@ -152,8 +160,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"POST", "/api/fleet/enrollment_api_keys"}: {code: 200, body: enrollmentKeySample}, }), }, - want: asObject(enrollmentKeySample), - wantErr: false, + want: asObject(enrollmentKeySample), + wantEvents: []string{"Warning Validation spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release"}, + wantErr: false, }, { name: "Error in Fleet API", @@ -170,8 +179,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"GET", "/api/fleet/enrollment_api_keys"}: {code: 500}, // could also be a timeout etc }), }, - want: EnrollmentAPIKey{}, - wantErr: true, + want: EnrollmentAPIKey{}, + wantEvents: []string{"Warning Validation spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release"}, + wantErr: true, }, { name: "Fleet Server policy and key", @@ -193,8 +203,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"GET", "/api/fleet/enrollment_api_keys/some-token-id"}: {code: 200, body: fleetServerKeySample}, }), }, - want: asObject(fleetServerKeySample), - wantErr: false, + want: asObject(fleetServerKeySample), + wantEvents: []string{"Warning Validation spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release"}, + wantErr: false, }, { name: "Error in Kubernetes API", @@ -220,8 +231,9 @@ func Test_reconcileEnrollmentToken(t *testing.T) { {"POST", "/api/fleet/enrollment_api_keys"}: {code: 200, body: fleetServerKeySample}, }), }, - want: EnrollmentAPIKey{}, - wantErr: true, + want: EnrollmentAPIKey{}, + wantEvents: []string{"Warning Validation spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release"}, + wantErr: true, }, } for _, tt := range tests { @@ -230,7 +242,14 @@ func Test_reconcileEnrollmentToken(t *testing.T) { if tt.args.client != nil { client = *tt.args.client } - got, err := reconcileEnrollmentToken(context.Background(), tt.args.agent, client, tt.args.api.fleetAPI) + fakeRecorder := record.NewFakeRecorder(10) + params := Params{ + Context: context.Background(), + Client: client, + EventRecorder: fakeRecorder, + Agent: tt.args.agent, + } + got, err := reconcileEnrollmentToken(params, tt.args.api.fleetAPI) require.Empty(t, tt.args.api.missingRequests()) if (err != nil) != tt.wantErr { t.Errorf("reconcileEnrollmentToken() error = %v, wantErr %v", err, tt.wantErr) @@ -239,6 +258,8 @@ func Test_reconcileEnrollmentToken(t *testing.T) { if got != tt.want { t.Errorf("reconcileEnrollmentToken() got = %v, want %v", got, tt.want) } + gotEvents := test.ReadAtMostEvents(t, len(tt.wantEvents), fakeRecorder) + assert.Equal(t, tt.wantEvents, gotEvents) }) } } diff --git a/pkg/controller/common/webhook/warnings.go b/pkg/controller/common/webhook/warnings.go new file mode 100644 index 0000000000..f39191abbd --- /dev/null +++ b/pkg/controller/common/webhook/warnings.go @@ -0,0 +1,21 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package webhook + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +type HasWarnings interface { + GetWarnings() []string +} + +func MaybeGetWarnings(object runtime.Object) []string { + v, ok := object.(HasWarnings) + if ok { + return v.GetWarnings() + } + return nil +} diff --git a/pkg/controller/common/webhook/webhook.go b/pkg/controller/common/webhook/webhook.go index ed749ac83e..7e9e7bf37b 100644 --- a/pkg/controller/common/webhook/webhook.go +++ b/pkg/controller/common/webhook/webhook.go @@ -88,14 +88,16 @@ func (v *validatingWebhook) Handle(ctx context.Context, req admission.Request) a return admission.Allowed("") } + warnings := MaybeGetWarnings(obj) + if err := v.commonValidations(ctx, req, obj); err != nil { - return admission.Denied(err.Error()) + return admission.Denied(err.Error()).WithWarnings(warnings...) } if req.Operation == admissionv1.Create { err = obj.ValidateCreate() if err != nil { - return admission.Denied(err.Error()) + return admission.Denied(err.Error()).WithWarnings(warnings...) } } @@ -104,11 +106,11 @@ func (v *validatingWebhook) Handle(ctx context.Context, req admission.Request) a err = v.decoder.DecodeRaw(req.OldObject, oldObj) if err != nil { whlog.Error(err, "decoding old object from webhook request into type (%T)", oldObj) - return admission.Errored(http.StatusBadRequest, err) + return admission.Errored(http.StatusBadRequest, err).WithWarnings(warnings...) } err = obj.ValidateUpdate(oldObj) if err != nil { - return admission.Denied(err.Error()) + return admission.Denied(err.Error()).WithWarnings(warnings...) } } @@ -119,5 +121,5 @@ func (v *validatingWebhook) Handle(ctx context.Context, req admission.Request) a } } - return admission.Allowed("") + return admission.Allowed("").WithWarnings(warnings...) } diff --git a/pkg/controller/common/webhook/webhook_test.go b/pkg/controller/common/webhook/webhook_test.go index 7f305d9582..4c0c45fb05 100644 --- a/pkg/controller/common/webhook/webhook_test.go +++ b/pkg/controller/common/webhook/webhook_test.go @@ -60,6 +60,7 @@ func Test_validatingWebhook_Handle(t *testing.T) { Spec: agentv1alpha1.AgentSpec{ Version: "7.10.0", Deployment: &agentv1alpha1.DeploymentSpec{}, + PolicyID: "a-policy", }, }), }, @@ -67,6 +68,34 @@ func Test_validatingWebhook_Handle(t *testing.T) { }, want: admission.Allowed(""), }, + { + name: "no policy id is allowed but it should return a warning.", + fields: fields{ + set.Make("elastic"), + &agentv1alpha1.Agent{}, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: asJSON(&agentv1alpha1.Agent{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testAgent", + Namespace: "elastic", + Labels: map[string]string{ + "test": "label1", + }, + }, + Spec: agentv1alpha1.AgentSpec{ + Version: "7.10.0", + Deployment: &agentv1alpha1.DeploymentSpec{}, + }, + }), + }, + }, + }, + want: admission.Allowed("").WithWarnings("Agent elastic/testAgent: spec.PolicyID is empty, spec.PolicyID will become mandatory in a future release"), + }, { name: "create agent is denied because of invalid version, and returns denied.", fields: fields{ @@ -88,6 +117,7 @@ func Test_validatingWebhook_Handle(t *testing.T) { Spec: agentv1alpha1.AgentSpec{ Version: "0.10.0", Deployment: &agentv1alpha1.DeploymentSpec{}, + PolicyID: "a-policy", }, }), }, @@ -116,6 +146,7 @@ func Test_validatingWebhook_Handle(t *testing.T) { Spec: agentv1alpha1.AgentSpec{ Version: "7.10.0", Deployment: &agentv1alpha1.DeploymentSpec{}, + PolicyID: "a-policy", }, }), }, @@ -172,6 +203,7 @@ func Test_validatingWebhook_Handle(t *testing.T) { Spec: agentv1alpha1.AgentSpec{ Version: "7.10.0", Deployment: &agentv1alpha1.DeploymentSpec{}, + PolicyID: "a-policy", }, }), }, @@ -187,6 +219,7 @@ func Test_validatingWebhook_Handle(t *testing.T) { Spec: agentv1alpha1.AgentSpec{ Version: "7.10.0", Deployment: &agentv1alpha1.DeploymentSpec{}, + PolicyID: "a-policy", }, }), }, @@ -215,6 +248,7 @@ func Test_validatingWebhook_Handle(t *testing.T) { Spec: agentv1alpha1.AgentSpec{ Version: "7.10.1", Deployment: &agentv1alpha1.DeploymentSpec{}, + PolicyID: "a-policy", }, }), }, @@ -230,6 +264,7 @@ func Test_validatingWebhook_Handle(t *testing.T) { Spec: agentv1alpha1.AgentSpec{ Version: "7.10.0", Deployment: &agentv1alpha1.DeploymentSpec{}, + PolicyID: "a-policy", }, }), }, diff --git a/pkg/utils/test/events.go b/pkg/utils/test/events.go new file mode 100644 index 0000000000..1aef09bf74 --- /dev/null +++ b/pkg/utils/test/events.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package test + +import ( + "testing" + + "k8s.io/client-go/tools/record" +) + +// ReadAtMostEvents attempts to read at most minEventCount from a FakeRecorder. +// This functions assumes that all the events are available in recorder. +func ReadAtMostEvents(t *testing.T, minEventCount int, recorder *record.FakeRecorder) []string { + t.Helper() + if minEventCount == 0 { + return nil + } + gotEvents := make([]string, 0, minEventCount) + gotEventCount := 0 + for i := 0; i < minEventCount; i++ { + select { + case e := <-recorder.Events: + gotEvents = append(gotEvents, e) + gotEventCount++ + default: + t.Errorf("expected at least %d events, got %d", minEventCount, gotEventCount) + } + } + return gotEvents +} From f148a3c8d07265e458c2126daafd453707f7a4f5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 28 Apr 2023 10:15:38 +0200 Subject: [PATCH 03/26] Update modules go.elastic.co/apm/* to v2.4.1 (#6739) * Update module go.elastic.co/apm/v2 to v2.4.1 * Update module go.elastic.co/apm/module/apmzap/v2 to v2.4.1 * Update module go.elastic.co/apm/module/apmhttp/v2 to v2.4.1 * Update module go.elastic.co/apm/module/apmelasticsearch/v2 to v2.4.1 --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Thibault Richard --- NOTICE.txt | 24 ++++++++++++------------ docs/reference/dependencies.asciidoc | 8 ++++---- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 84e9d0e5bd..78503ae581 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2914,11 +2914,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Module : go.elastic.co/apm/module/apmelasticsearch/v2 -Version : v2.3.0 -Time : 2023-03-31T03:30:00Z +Version : v2.4.1 +Time : 2023-04-27T13:39:08Z Licence : Apache-2.0 -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmelasticsearch/v2@v2.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmelasticsearch/v2@v2.4.1/LICENSE: Apache License Version 2.0, January 2004 @@ -3125,11 +3125,11 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmelasti -------------------------------------------------------------------------------- Module : go.elastic.co/apm/module/apmhttp/v2 -Version : v2.3.0 -Time : 2023-03-31T03:30:00Z +Version : v2.4.1 +Time : 2023-04-27T13:39:08Z Licence : Apache-2.0 -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmhttp/v2@v2.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmhttp/v2@v2.4.1/LICENSE: Apache License Version 2.0, January 2004 @@ -3336,11 +3336,11 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmhttp/v -------------------------------------------------------------------------------- Module : go.elastic.co/apm/module/apmzap/v2 -Version : v2.3.0 -Time : 2023-03-31T03:30:00Z +Version : v2.4.1 +Time : 2023-04-27T13:39:08Z Licence : Apache-2.0 -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmzap/v2@v2.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmzap/v2@v2.4.1/LICENSE: Apache License Version 2.0, January 2004 @@ -3547,11 +3547,11 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmzap/v2 -------------------------------------------------------------------------------- Module : go.elastic.co/apm/v2 -Version : v2.3.0 -Time : 2023-03-31T03:30:00Z +Version : v2.4.1 +Time : 2023-04-27T13:39:08Z Licence : Apache-2.0 -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/v2@v2.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/v2@v2.4.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/docs/reference/dependencies.asciidoc b/docs/reference/dependencies.asciidoc index 1e648dd19b..89e9ce138d 100644 --- a/docs/reference/dependencies.asciidoc +++ b/docs/reference/dependencies.asciidoc @@ -44,10 +44,10 @@ This page lists the third-party dependencies used to build {n}. | link:https://github.com/spf13/pflag[$$github.com/spf13/pflag$$] | v1.0.5 | BSD-3-Clause | link:https://github.com/spf13/viper[$$github.com/spf13/viper$$] | v1.15.0 | MIT | link:https://github.com/stretchr/testify[$$github.com/stretchr/testify$$] | v1.8.2 | MIT -| link:https://go.elastic.co/apm/module/apmelasticsearch/v2[$$go.elastic.co/apm/module/apmelasticsearch/v2$$] | v2.3.0 | Apache-2.0 -| link:https://go.elastic.co/apm/module/apmhttp/v2[$$go.elastic.co/apm/module/apmhttp/v2$$] | v2.3.0 | Apache-2.0 -| link:https://go.elastic.co/apm/module/apmzap/v2[$$go.elastic.co/apm/module/apmzap/v2$$] | v2.3.0 | Apache-2.0 -| link:https://go.elastic.co/apm/v2[$$go.elastic.co/apm/v2$$] | v2.3.0 | Apache-2.0 +| link:https://go.elastic.co/apm/module/apmelasticsearch/v2[$$go.elastic.co/apm/module/apmelasticsearch/v2$$] | v2.4.1 | Apache-2.0 +| link:https://go.elastic.co/apm/module/apmhttp/v2[$$go.elastic.co/apm/module/apmhttp/v2$$] | v2.4.1 | Apache-2.0 +| link:https://go.elastic.co/apm/module/apmzap/v2[$$go.elastic.co/apm/module/apmzap/v2$$] | v2.4.1 | Apache-2.0 +| link:https://go.elastic.co/apm/v2[$$go.elastic.co/apm/v2$$] | v2.4.1 | Apache-2.0 | link:https://go.uber.org/automaxprocs[$$go.uber.org/automaxprocs$$] | v1.5.2 | MIT | link:https://go.uber.org/zap[$$go.uber.org/zap$$] | v1.24.0 | MIT | link:https://golang.org/x/crypto[$$golang.org/x/crypto$$] | v0.8.0 | BSD-3-Clause diff --git a/go.mod b/go.mod index af7bf6a62a..6d657fcf6e 100644 --- a/go.mod +++ b/go.mod @@ -28,10 +28,10 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.8.2 - go.elastic.co/apm/module/apmelasticsearch/v2 v2.3.0 - go.elastic.co/apm/module/apmhttp/v2 v2.3.0 - go.elastic.co/apm/module/apmzap/v2 v2.3.0 - go.elastic.co/apm/v2 v2.3.0 + go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.1 + go.elastic.co/apm/module/apmhttp/v2 v2.4.1 + go.elastic.co/apm/module/apmzap/v2 v2.4.1 + go.elastic.co/apm/v2 v2.4.1 go.uber.org/automaxprocs v1.5.2 go.uber.org/zap v1.24.0 golang.org/x/crypto v0.8.0 diff --git a/go.sum b/go.sum index bd07e87d48..f0beab2c2a 100644 --- a/go.sum +++ b/go.sum @@ -396,14 +396,14 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.elastic.co/apm/module/apmelasticsearch/v2 v2.3.0 h1:n9y7A3yfdQpQKdYFdMVGvWvaFEZY+uP/dKxK9cYkpsc= -go.elastic.co/apm/module/apmelasticsearch/v2 v2.3.0/go.mod h1:k+h3QWoowcH/fzsb++t3rTq11nR2utQTwTRqub4Cwns= -go.elastic.co/apm/module/apmhttp/v2 v2.3.0 h1:yGZyp26uJXUCfRTwvMmDt1d1jJrHgTBBncZfpYAxR8s= -go.elastic.co/apm/module/apmhttp/v2 v2.3.0/go.mod h1:JCszLIey4ndJGuUUu5FQjNOiTfaln1dqCqXnRcXVxVc= -go.elastic.co/apm/module/apmzap/v2 v2.3.0 h1:jwxdgAPeH4wRbWVSI3CYymlDNw6j983Up7W2lW3UmaM= -go.elastic.co/apm/module/apmzap/v2 v2.3.0/go.mod h1:x+2LUqavbVCYGcV7cxBnYsKKbOnbkIQwNpFof/qzS2M= -go.elastic.co/apm/v2 v2.3.0 h1:jsZQsGWyMyga6xRMcYhKtPvrr5en8wqbmJNmxltST/E= -go.elastic.co/apm/v2 v2.3.0/go.mod h1:HdwVuAeoJMmoqAZZBNN2YVzj3UVLebtqoRCCydyCP+Q= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.1 h1:JcERpDUfaheW6k3Smmx1bfyFe4PcmFU6RMaBzsLwS8M= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.1/go.mod h1:zurLQudUp3VSmmLEoYpIP2OkLzsDYOkTWulGZYA2bE8= +go.elastic.co/apm/module/apmhttp/v2 v2.4.1 h1:4jIuTNh14opEIwf+zMru1jGW3MuNj6vtI0dsx+6QMt0= +go.elastic.co/apm/module/apmhttp/v2 v2.4.1/go.mod h1:pJ2sd5b4YCX1YX9VzhvFO6AXpqCPiP3dP5U9qBD4s+8= +go.elastic.co/apm/module/apmzap/v2 v2.4.1 h1:vv5ZbrDvBrF0H7D+tuhSQ+NOrqbCH7d+hZNxrP/omRM= +go.elastic.co/apm/module/apmzap/v2 v2.4.1/go.mod h1:OZs/UznopxCb8Ax/P1va0AhReViu+61D84/4W5wj9Oc= +go.elastic.co/apm/v2 v2.4.1 h1:tMxAtHh5TXTYdFG0pTmmUOn/PTI3k/1T1ptb+3O+hYI= +go.elastic.co/apm/v2 v2.4.1/go.mod h1:HdwVuAeoJMmoqAZZBNN2YVzj3UVLebtqoRCCydyCP+Q= go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= From b6b50758127d24c1e67b62799711864fcd138582 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 28 Apr 2023 10:13:51 -0400 Subject: [PATCH 04/26] Introduce the Logstash operator for ECK (#6732) This commit introduces a technical preview of the Logstash Operator for ECK The Logstash operator introduces a Logstash CRD: This operator provides support for: * Defining logstash.yml in config or configRef sections of the CRD * Integration with Elasticsearch clusters via the use of elasticsearchRefs, and environment variable substitution to introduce those elasticsearch references into logstash pipelines. * Definition of pipelines.yml in pipelines or pipelinesRef sections of the CRD with support for pipeline definition in volume mounts * Support for multiple pipeline and pipeline->pipeline configurations * Support for automatic pipeline reload in logstash pods when a pipeline change is detected without triggering a full restart of the pod. * Stack monitoring support via sending metrics and logs to a monitoring elasticsearch cluster via the use of monitoring.logs.elasticsearchRefs and monitoring.metrics.elasticsearchRefs * Support for defining multiple services for logstash plugins. Logstash nodes are created as StatefulSets - we expect in later versions of the logstash operator to support persistence in Logstash nodes, including persistent queues and dead letter queues. A work in progress PR includes documentation and recipes on how to use this logstash operator. There are also samples in this PR located under config/samples/logstash/* Co-authored-by: Michael Morello Co-authored-by: Rob Bavey Co-authored-by: Kaise Cheng Co-authored-by: kaisecheng <69120390+kaisecheng@users.noreply.github.com> Co-authored-by: Michael Morello Co-authored-by: Thibault Richard Co-authored-by: Peter Brachwitz --- cmd/manager/main.go | 25 +- config/crds/v1/all-crds.yaml | 785 ++ config/crds/v1/bases/kustomization.yaml | 1 + .../logstash.k8s.elastic.co_logstashes.yaml | 8196 +++++++++++++++++ config/crds/v1/patches/kustomization.yaml | 8 + config/crds/v1/patches/logstash-patches.yaml | 7 + config/e2e/rbac.yaml | 13 + config/samples/logstash/logstash.yaml | 18 + config/samples/logstash/logstash_es.yaml | 36 + .../logstash/logstash_stackmonitor.yaml | 46 + config/samples/logstash/logstash_svc.yaml | 51 + config/webhook/manifests.yaml | 22 + .../eck-operator-crds/templates/all-crds.yaml | 791 ++ deploy/eck-operator/templates/_helpers.tpl | 13 + .../eck-operator/templates/cluster-roles.yaml | 6 + docs/reference/api-docs.asciidoc | 125 + hack/api-docs/config.yaml | 2 +- hack/operatorhub/config.yaml | 3 + hack/operatorhub/templates/csv.tpl | 13 +- pkg/apis/beat/v1beta1/validations.go | 2 +- pkg/apis/common/v1/association.go | 2 + pkg/apis/kibana/v1/webhook.go | 2 +- pkg/apis/logstash/v1alpha1/doc.go | 11 + .../logstash/v1alpha1/groupversion_info.go | 21 + pkg/apis/logstash/v1alpha1/labels.go | 17 + pkg/apis/logstash/v1alpha1/logstash_types.go | 381 + pkg/apis/logstash/v1alpha1/name.go | 41 + pkg/apis/logstash/v1alpha1/name_test.go | 81 + pkg/apis/logstash/v1alpha1/validations.go | 107 + .../logstash/v1alpha1/validations_test.go | 404 + pkg/apis/logstash/v1alpha1/webhook.go | 84 + pkg/apis/logstash/v1alpha1/webhook_test.go | 124 + .../v1alpha1/zz_generated.deepcopy.go | 254 + pkg/controller/agent/controller.go | 4 +- pkg/controller/apmserver/controller.go | 8 +- .../association/controller/logstash_es.go | 66 + .../controller/logstash_monitoring.go | 57 + pkg/controller/association/reconciler.go | 2 +- pkg/controller/beat/controller.go | 4 +- pkg/controller/common/configref.go | 26 +- pkg/controller/common/container/container.go | 1 + pkg/controller/common/reconciler/secret.go | 17 +- pkg/controller/common/scheme/scheme.go | 4 + .../stackmon/validations/validations.go | 14 +- .../stackmon/validations/validations_test.go | 2 +- pkg/controller/common/version/version.go | 1 + .../elasticsearch/certificates/reconcile.go | 2 +- .../elasticsearch/elasticsearch_controller.go | 4 +- .../elasticsearch/user/reconcile_test.go | 2 +- pkg/controller/elasticsearch/user/roles.go | 18 + .../elasticsearch/validation/validations.go | 2 +- .../enterprisesearch_controller.go | 4 +- pkg/controller/kibana/controller.go | 4 +- pkg/controller/kibana/driver.go | 6 +- pkg/controller/logstash/config.go | 83 + pkg/controller/logstash/config_test.go | 162 + pkg/controller/logstash/driver.go | 103 + pkg/controller/logstash/env.go | 80 + pkg/controller/logstash/env_test.go | 164 + pkg/controller/logstash/initcontainer.go | 75 + pkg/controller/logstash/labels.go | 36 + .../logstash/logstash_controller.go | 210 + .../logstash/logstash_controller_test.go | 465 + pkg/controller/logstash/network/ports.go | 10 + pkg/controller/logstash/pipeline.go | 88 + pkg/controller/logstash/pipeline_test.go | 124 + pkg/controller/logstash/pipelines/config.go | 135 + .../logstash/pipelines/config_test.go | 287 + pkg/controller/logstash/pipelines/ref.go | 37 + pkg/controller/logstash/pipelines/ref_test.go | 191 + pkg/controller/logstash/pod.go | 167 + pkg/controller/logstash/pod_test.go | 286 + pkg/controller/logstash/reconcile.go | 82 + pkg/controller/logstash/service.go | 97 + pkg/controller/logstash/service_test.go | 216 + pkg/controller/logstash/sset/sset.go | 96 + .../logstash/stackmon/beat_config.go | 61 + pkg/controller/logstash/stackmon/filebeat.yml | 19 + pkg/controller/logstash/stackmon/ls_config.go | 14 + .../logstash/stackmon/metricbeat.tpl.yml | 13 + pkg/controller/logstash/stackmon/sidecar.go | 110 + .../logstash/stackmon/sidecar_test.go | 179 + pkg/controller/logstash/volume.go | 95 + pkg/controller/logstash/volume_test.go | 89 + pkg/controller/maps/controller.go | 4 +- .../stackconfigpolicy/controller.go | 2 +- pkg/telemetry/fixtures.go | 8 + pkg/telemetry/telemetry.go | 42 +- pkg/telemetry/telemetry_test.go | 72 + pkg/utils/k8s/k8sutils.go | 4 +- test/e2e/beat/config_test.go | 2 +- test/e2e/es/stack_monitoring_test.go | 4 +- test/e2e/kb/stack_monitoring_test.go | 2 +- test/e2e/logstash/es_output_test.go | 80 + test/e2e/logstash/logstash_test.go | 125 + test/e2e/logstash/pipeline_test.go | 224 + test/e2e/logstash/stack_monitoring_test.go | 44 + test/e2e/samples_test.go | 27 + test/e2e/stack_test.go | 24 +- test/e2e/test/checks/monitoring.go | 4 +- test/e2e/test/helper/yaml.go | 26 +- test/e2e/test/k8s_client.go | 23 + test/e2e/test/logstash/builder.go | 236 + test/e2e/test/logstash/checks.go | 290 + test/e2e/test/logstash/http_client.go | 64 + test/e2e/test/logstash/steps.go | 149 + 106 files changed, 17106 insertions(+), 64 deletions(-) create mode 100644 config/crds/v1/bases/logstash.k8s.elastic.co_logstashes.yaml create mode 100644 config/crds/v1/patches/logstash-patches.yaml create mode 100644 config/samples/logstash/logstash.yaml create mode 100644 config/samples/logstash/logstash_es.yaml create mode 100644 config/samples/logstash/logstash_stackmonitor.yaml create mode 100644 config/samples/logstash/logstash_svc.yaml create mode 100644 pkg/apis/logstash/v1alpha1/doc.go create mode 100644 pkg/apis/logstash/v1alpha1/groupversion_info.go create mode 100644 pkg/apis/logstash/v1alpha1/labels.go create mode 100644 pkg/apis/logstash/v1alpha1/logstash_types.go create mode 100644 pkg/apis/logstash/v1alpha1/name.go create mode 100644 pkg/apis/logstash/v1alpha1/name_test.go create mode 100644 pkg/apis/logstash/v1alpha1/validations.go create mode 100644 pkg/apis/logstash/v1alpha1/validations_test.go create mode 100644 pkg/apis/logstash/v1alpha1/webhook.go create mode 100644 pkg/apis/logstash/v1alpha1/webhook_test.go create mode 100644 pkg/apis/logstash/v1alpha1/zz_generated.deepcopy.go create mode 100644 pkg/controller/association/controller/logstash_es.go create mode 100644 pkg/controller/association/controller/logstash_monitoring.go create mode 100644 pkg/controller/logstash/config.go create mode 100644 pkg/controller/logstash/config_test.go create mode 100644 pkg/controller/logstash/driver.go create mode 100644 pkg/controller/logstash/env.go create mode 100644 pkg/controller/logstash/env_test.go create mode 100644 pkg/controller/logstash/initcontainer.go create mode 100644 pkg/controller/logstash/labels.go create mode 100644 pkg/controller/logstash/logstash_controller.go create mode 100644 pkg/controller/logstash/logstash_controller_test.go create mode 100644 pkg/controller/logstash/network/ports.go create mode 100644 pkg/controller/logstash/pipeline.go create mode 100644 pkg/controller/logstash/pipeline_test.go create mode 100644 pkg/controller/logstash/pipelines/config.go create mode 100644 pkg/controller/logstash/pipelines/config_test.go create mode 100644 pkg/controller/logstash/pipelines/ref.go create mode 100644 pkg/controller/logstash/pipelines/ref_test.go create mode 100644 pkg/controller/logstash/pod.go create mode 100644 pkg/controller/logstash/pod_test.go create mode 100644 pkg/controller/logstash/reconcile.go create mode 100644 pkg/controller/logstash/service.go create mode 100644 pkg/controller/logstash/service_test.go create mode 100644 pkg/controller/logstash/sset/sset.go create mode 100644 pkg/controller/logstash/stackmon/beat_config.go create mode 100644 pkg/controller/logstash/stackmon/filebeat.yml create mode 100644 pkg/controller/logstash/stackmon/ls_config.go create mode 100644 pkg/controller/logstash/stackmon/metricbeat.tpl.yml create mode 100644 pkg/controller/logstash/stackmon/sidecar.go create mode 100644 pkg/controller/logstash/stackmon/sidecar_test.go create mode 100644 pkg/controller/logstash/volume.go create mode 100644 pkg/controller/logstash/volume_test.go create mode 100644 test/e2e/logstash/es_output_test.go create mode 100644 test/e2e/logstash/logstash_test.go create mode 100644 test/e2e/logstash/pipeline_test.go create mode 100644 test/e2e/logstash/stack_monitoring_test.go create mode 100644 test/e2e/test/logstash/builder.go create mode 100644 test/e2e/test/logstash/checks.go create mode 100644 test/e2e/test/logstash/http_client.go create mode 100644 test/e2e/test/logstash/steps.go diff --git a/cmd/manager/main.go b/cmd/manager/main.go index eb8ae40dcc..2128533822 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -16,6 +16,9 @@ import ( "strings" "time" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash" + "github.com/go-logr/logr" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -847,6 +850,7 @@ func registerControllers(mgr manager.Manager, params operator.Parameters, access {name: "Agent", registerFunc: agent.Add}, {name: "Maps", registerFunc: maps.Add}, {name: "StackConfigPolicy", registerFunc: stackconfigpolicy.Add}, + {name: "Logstash", registerFunc: logstash.Add}, } for _, c := range controllers { @@ -872,9 +876,11 @@ func registerControllers(mgr manager.Manager, params operator.Parameters, access {name: "AGENT-KB", registerFunc: associationctl.AddAgentKibana}, {name: "AGENT-FS", registerFunc: associationctl.AddAgentFleetServer}, {name: "EMS-ES", registerFunc: associationctl.AddMapsES}, + {name: "LOGSTASH-ES", registerFunc: associationctl.AddLogstashES}, {name: "ES-MONITORING", registerFunc: associationctl.AddEsMonitoring}, {name: "KB-MONITORING", registerFunc: associationctl.AddKbMonitoring}, {name: "BEAT-MONITORING", registerFunc: associationctl.AddBeatMonitoring}, + {name: "LOGSTASH-MONITORING", registerFunc: associationctl.AddLogstashMonitoring}, } for _, c := range assocControllers { @@ -913,6 +919,7 @@ func garbageCollectUsers(ctx context.Context, cfg *rest.Config, managedNamespace For(&beatv1beta1.BeatList{}, associationctl.BeatAssociationLabelNamespace, associationctl.BeatAssociationLabelName). For(&agentv1alpha1.AgentList{}, associationctl.AgentAssociationLabelNamespace, associationctl.AgentAssociationLabelName). For(&emsv1alpha1.ElasticMapsServerList{}, associationctl.MapsESAssociationLabelNamespace, associationctl.MapsESAssociationLabelName). + For(&logstashv1alpha1.LogstashList{}, associationctl.LogstashAssociationLabelNamespace, associationctl.LogstashAssociationLabelName). DoGarbageCollection(ctx) if err != nil { return fmt.Errorf("user garbage collector failed: %w", err) @@ -925,14 +932,15 @@ func garbageCollectSoftOwnedSecrets(ctx context.Context, k8sClient k8s.Client) { defer span.End() if err := reconciler.GarbageCollectAllSoftOwnedOrphanSecrets(ctx, k8sClient, map[string]client.Object{ - esv1.Kind: &esv1.Elasticsearch{}, - apmv1.Kind: &apmv1.ApmServer{}, - kbv1.Kind: &kbv1.Kibana{}, - entv1.Kind: &entv1.EnterpriseSearch{}, - beatv1beta1.Kind: &beatv1beta1.Beat{}, - agentv1alpha1.Kind: &agentv1alpha1.Agent{}, - emsv1alpha1.Kind: &emsv1alpha1.ElasticMapsServer{}, - policyv1alpha1.Kind: &policyv1alpha1.StackConfigPolicy{}, + esv1.Kind: &esv1.Elasticsearch{}, + apmv1.Kind: &apmv1.ApmServer{}, + kbv1.Kind: &kbv1.Kibana{}, + entv1.Kind: &entv1.EnterpriseSearch{}, + beatv1beta1.Kind: &beatv1beta1.Beat{}, + agentv1alpha1.Kind: &agentv1alpha1.Agent{}, + emsv1alpha1.Kind: &emsv1alpha1.ElasticMapsServer{}, + policyv1alpha1.Kind: &policyv1alpha1.StackConfigPolicy{}, + logstashv1alpha1.Kind: &logstashv1alpha1.Logstash{}, }); err != nil { log.Error(err, "Orphan secrets garbage collection failed, will be attempted again at next operator restart.") return @@ -973,6 +981,7 @@ func setupWebhook( &kbv1.Kibana{}, &kbv1beta1.Kibana{}, &emsv1alpha1.ElasticMapsServer{}, + &logstashv1alpha1.Logstash{}, } for _, obj := range webhookObjects { if err := commonwebhook.SetupValidatingWebhookWithConfig(&commonwebhook.Config{ diff --git a/config/crds/v1/all-crds.yaml b/config/crds/v1/all-crds.yaml index 6ce102bc14..09fb08551b 100644 --- a/config/crds/v1/all-crds.yaml +++ b/config/crds/v1/all-crds.yaml @@ -9039,6 +9039,791 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.4 + name: logstashes.logstash.k8s.elastic.co +spec: + group: logstash.k8s.elastic.co + names: + categories: + - elastic + kind: Logstash + listKind: LogstashList + plural: logstashes + shortNames: + - ls + singular: logstash + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Available nodes + jsonPath: .status.availableNodes + name: available + type: integer + - description: Expected nodes + jsonPath: .status.expectedNodes + name: expected + type: integer + - jsonPath: .metadata.creationTimestamp + name: age + type: date + - description: Logstash version + jsonPath: .status.version + name: version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Logstash is the Schema for the logstashes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LogstashSpec defines the desired state of Logstash + properties: + config: + description: Config holds the Logstash configuration. At most one + of [`Config`, `ConfigRef`] can be specified. + type: object + x-kubernetes-preserve-unknown-fields: true + configRef: + description: ConfigRef contains a reference to an existing Kubernetes + Secret holding the Logstash configuration. Logstash settings must + be specified as yaml, under a single "logstash.yml" entry. At most + one of [`Config`, `ConfigRef`] can be specified. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + count: + format: int32 + type: integer + elasticsearchRefs: + description: ElasticsearchRefs are references to Elasticsearch clusters + running in the same Kubernetes cluster. + items: + description: ElasticsearchCluster is a named reference to an Elasticsearch + cluster which can be used in a Logstash pipeline. + properties: + clusterName: + description: ClusterName is an alias for the cluster to be used + to refer to the Elasticsearch cluster in Logstash configuration + files, and will be used to identify "named clusters" in Logstash + minLength: 1 + type: string + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If empty, defaults + to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing Kubernetes + secret that contains connection information for associating + an Elastic resource not managed by the operator. The referenced + secret must contain the following: - `url`: the URL to reach + the Elastic resource - `username`: the username of the user + to be authenticated to the Elastic resource - `password`: + the password of the user to be authenticated to the Elastic + resource - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the other fields + name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing Kubernetes + service which is used to make requests to the referenced object. + It has to be in the same namespace as the referenced resource. + If left empty, the default HTTP service of the referenced + resource is used. + type: string + type: object + type: array + image: + description: Image is the Logstash Docker image to deploy. Version + and Type have to match the Logstash in the image. + type: string + monitoring: + description: Monitoring enables you to collect and ship log and monitoring + data of this Logstash. Metricbeat and Filebeat are deployed in the + same Pod as sidecars and each one sends data to one or two different + Elasticsearch monitoring clusters running in the same Kubernetes + cluster. + properties: + logs: + description: Logs holds references to Elasticsearch clusters which + receive log data from an associated resource. + properties: + elasticsearchRefs: + description: ElasticsearchRefs is a reference to a list of + monitoring Elasticsearch clusters running in the same Kubernetes + cluster. Due to existing limitations, only a single Elasticsearch + cluster is currently supported. + items: + description: ObjectSelector defines a reference to a Kubernetes + object which can be an Elastic resource managed by the + operator or a Secret describing an external Elastic resource + not managed by the operator. + properties: + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If + empty, defaults to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing + Kubernetes secret that contains connection information + for associating an Elastic resource not managed by + the operator. The referenced secret must contain the + following: - `url`: the URL to reach the Elastic resource + - `username`: the username of the user to be authenticated + to the Elastic resource - `password`: the password + of the user to be authenticated to the Elastic resource + - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the + other fields name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing + Kubernetes service which is used to make requests + to the referenced object. It has to be in the same + namespace as the referenced resource. If left empty, + the default HTTP service of the referenced resource + is used. + type: string + type: object + type: array + type: object + metrics: + description: Metrics holds references to Elasticsearch clusters + which receive monitoring data from this resource. + properties: + elasticsearchRefs: + description: ElasticsearchRefs is a reference to a list of + monitoring Elasticsearch clusters running in the same Kubernetes + cluster. Due to existing limitations, only a single Elasticsearch + cluster is currently supported. + items: + description: ObjectSelector defines a reference to a Kubernetes + object which can be an Elastic resource managed by the + operator or a Secret describing an external Elastic resource + not managed by the operator. + properties: + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If + empty, defaults to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing + Kubernetes secret that contains connection information + for associating an Elastic resource not managed by + the operator. The referenced secret must contain the + following: - `url`: the URL to reach the Elastic resource + - `username`: the username of the user to be authenticated + to the Elastic resource - `password`: the password + of the user to be authenticated to the Elastic resource + - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the + other fields name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing + Kubernetes service which is used to make requests + to the referenced object. It has to be in the same + namespace as the referenced resource. If left empty, + the default HTTP service of the referenced resource + is used. + type: string + type: object + type: array + type: object + type: object + pipelines: + description: Pipelines holds the Logstash Pipelines. At most one of + [`Pipelines`, `PipelinesRef`] can be specified. + items: + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + pipelinesRef: + description: PipelinesRef contains a reference to an existing Kubernetes + Secret holding the Logstash Pipelines. Logstash pipelines must be + specified as yaml, under a single "pipelines.yml" entry. At most + one of [`Pipelines`, `PipelinesRef`] can be specified. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + podTemplate: + description: PodTemplate provides customisation options for the Logstash + pods. + type: object + x-kubernetes-preserve-unknown-fields: true + revisionHistoryLimit: + description: RevisionHistoryLimit is the number of revisions to retain + to allow rollback in the underlying StatefulSet. + format: int32 + type: integer + secureSettings: + description: SecureSettings is a list of references to Kubernetes + Secrets containing sensitive configuration options for the Logstash. + Secrets data can be then referenced in the Logstash config using + the Secret's keys or as specified in `Entries` field of each SecureSetting. + items: + description: SecretSource defines a data source based on a Kubernetes + Secret. + properties: + entries: + description: Entries define how to project each key-value pair + in the secret to filesystem paths. If not defined, all keys + will be projected to similarly named paths in the filesystem. + If defined, only the specified keys will be projected to the + corresponding paths. + items: + description: KeyToPath defines how to map a key in a Secret + object to a filesystem path. + properties: + key: + description: Key is the key contained in the secret. + type: string + path: + description: Path is the relative file path to map the + key to. Path must not be an absolute file path and must + not contain any ".." components. + type: string + required: + - key + type: object + type: array + secretName: + description: SecretName is the name of the secret. + type: string + required: + - secretName + type: object + type: array + serviceAccountName: + description: ServiceAccountName is used to check access from the current + resource to Elasticsearch resource in a different namespace. Can + only be used if ECK is enforcing RBAC on references. + type: string + services: + description: 'Services contains details of services that Logstash + should expose - similar to the HTTP layer configuration for the + rest of the stack, but also applicable for more use cases than the + metrics API, as logstash may need to be opened up for other services: + Beats, TCP, UDP, etc, inputs.' + items: + properties: + name: + type: string + service: + description: Service defines the template for the associated + Kubernetes Service object. + properties: + metadata: + description: ObjectMeta is the metadata of the service. + The name and namespace provided here are managed by ECK + and will be ignored. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: Spec is the specification of the service. + properties: + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if + NodePorts will be automatically allocated for services + with type LoadBalancer. Default is "true". It may + be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests + will be respected, regardless of this field. This + field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any + other type. + type: boolean + clusterIP: + description: 'clusterIP is the IP address of the service + and is usually assigned randomly. If an address is + specified manually, is in-range (as per system configuration), + and is not in use, it will be allocated to the service; + otherwise creation of the service will fail. This + field may not be changed through updates unless the + type field is also being changed to ExternalName (which + requires this field to be blank) or the type field + is being changed from ExternalName (in which case + this field may optionally be specified, as describe + above). Valid values are "None", empty string (""), + or a valid IP address. Setting this to "None" makes + a "headless service" (no virtual IP), which is useful + when direct endpoint connections are preferred and + proxying is not required. Only applies to types ClusterIP, + NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation + will fail. This field will be wiped when updating + a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + clusterIPs: + description: "ClusterIPs is a list of IP addresses assigned + to this service, and are usually assigned randomly. + \ If an address is specified manually, is in-range + (as per system configuration), and is not in use, + it will be allocated to the service; otherwise creation + of the service will fail. This field may not be changed + through updates unless the type field is also being + changed to ExternalName (which requires this field + to be empty) or the type field is being changed from + ExternalName (in which case this field may optionally + be specified, as describe above). Valid values are + \"None\", empty string (\"\"), or a valid IP address. + \ Setting this to \"None\" makes a \"headless service\" + (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. + \ Only applies to types ClusterIP, NodePort, and LoadBalancer. + If this field is specified when creating a Service + of type ExternalName, creation will fail. This field + will be wiped when updating a Service to type ExternalName. + \ If this field is not specified, it will be initialized + from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP + have the same value. \n This field may hold a maximum + of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies + field. Both clusterIPs and ipFamilies are governed + by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: externalIPs is a list of IP addresses for + which nodes in the cluster will also accept traffic + for this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes + system. + items: + type: string + type: array + externalName: + description: externalName is the external reference + that discovery mechanisms will return as an alias + for this service (e.g. a DNS CNAME record). No proxying + will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires + `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes + distribute service traffic they receive on one of + the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", + the proxy will configure the service in a way that + assumes that external load balancers will take care + of balancing the service traffic between nodes, and + so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the + client source IP. (Traffic mistakenly sent to a node + with no endpoints will be dropped.) The default value, + "Cluster", uses the standard behavior of routing to + all endpoints evenly (possibly modified by topology + and other features). Note that traffic sent to an + External IP or LoadBalancer IP from within the cluster + will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is + set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, + a value will be automatically allocated. External + systems (e.g. load-balancers) can use this port to + determine if a given node holds endpoints for this + service or not. If this field is specified when creating + a Service which does not need it, creation will fail. + This field will be wiped when updating a Service to + no longer need it (e.g. changing type). This field + cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes + distribute service traffic they receive on the ClusterIP. + If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the + same node as the pod, dropping the traffic if there + are no local endpoints. The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + type: string + ipFamilies: + description: "IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is + usually assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the + cluster, and ipFamilyPolicy allows it, it will be + used; otherwise creation of the service will fail. + This field is conditionally mutable: it allows for + adding or removing a secondary IP family, but it does + not allow changing the primary IP family of the Service. + Valid values are \"IPv4\" and \"IPv6\". This field + only applies to Services of types ClusterIP, NodePort, + and LoadBalancer, and does apply to \"headless\" services. + This field will be wiped when updating a Service to + type ExternalName. \n This field may hold a maximum + of two entries (dual-stack families, in either order). + \ These families must correspond to the values of + the clusterIPs field, if specified. Both clusterIPs + and ipFamilies are governed by the ipFamilyPolicy + field." + items: + description: IPFamily represents the IP Family (IPv4 + or IPv6). This type is used to express the family + of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is + no value provided, then this field will be set to + SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters), or "RequireDualStack" (two + IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend + on the value of this field. This field will be wiped + when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If + specified, the value of this field must be a label-style + identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are + reserved for end-users. This field can only be set + when the Service type is 'LoadBalancer'. If not set, + the default load balancer implementation is used, + today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any + default load balancer implementation (e.g. cloud providers) + should ignore Services that set this field. This field + can only be set when creating or updating a Service + to type 'LoadBalancer'. Once set, it can not be changed. + This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load + balancer is created. This field will be ignored if + the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies + across implementations, and it cannot support dual-stack. + As of Kubernetes v1.24, users are encouraged to use + implementation-specific annotations when available. + This field may be removed in a future API version.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified + client IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by + this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this + port. This field follows standard Kubernetes + label syntax. Un-prefixed names are reserved + for IANA standard service names (as per RFC-6335 + and https://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names + such as mycompany.com/my-custom-protocol. + type: string + name: + description: The name of this port within the + service. This must be a DNS_LABEL. All ports + within a ServiceSpec must have unique names. + When considering the endpoints for a Service, + this must match the 'name' field in the EndpointPort. + Optional if only one ServicePort is defined + on this service. + type: string + nodePort: + description: 'The port on each node on which this + service is exposed when type is NodePort or + LoadBalancer. Usually assigned by the system. + If a value is specified, in-range, and not in + use it will be used, otherwise the operation + will fail. If not specified, a port will be + allocated if this Service requires one. If + this field is specified when creating a Service + which does not need it, creation will fail. + This field will be wiped when updating a Service + to no longer need it (e.g. changing type from + NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by + this service. + format: int32 + type: integer + protocol: + default: TCP + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access + on the pods targeted by the service. Number + must be in the range 1 to 65535. Name must be + an IANA_SVC_NAME. If this is a string, it will + be looked up as a named port in the target Pod''s + container ports. If this is not specified, the + value of the ''port'' field is used (an identity + map). This field is ignored for services with + clusterIP=None, and should be omitted or set + equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that + any agent which deals with endpoints for this Service + should disregard any indications of ready/not-ready. + The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV + DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints + and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" + even if the Pods themselves are not. Agents which + consume only Kubernetes generated endpoints through + the Endpoints or EndpointSlice resources can safely + assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label + keys and values matching this selector. If empty or + not present, the service is assumed to have an external + process managing its endpoints, which Kubernetes will + not modify. Only applies to types ClusterIP, NodePort, + and LoadBalancer. Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to + maintain session affinity. Enable client IP based + session affinity. Must be ClientIP or None. Defaults + to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The + value must be >0 && <=86400(for 1 day) if + ServiceAffinity == "ClientIP". Default value + is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, + ClusterIP, NodePort, and LoadBalancer. "ClusterIP" + allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector + or if that is not specified, by manual construction + of an Endpoints object or EndpointSlice objects. If + clusterIP is "None", no virtual IP is allocated and + the endpoints are published as a set of endpoints + rather than a virtual IP. "NodePort" builds on ClusterIP + and allocates a port on every node which routes to + the same endpoints as the clusterIP. "LoadBalancer" + builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to + the same endpoints as the clusterIP. "ExternalName" + aliases this service to the specified externalName. + Several other fields do not apply to ExternalName + services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + type: object + tls: + description: TLS defines options for configuring TLS for HTTP. + properties: + certificate: + description: "Certificate is a reference to a Kubernetes + secret that contains the certificate and private key for + enabling TLS. The referenced secret should contain the + following: \n - `ca.crt`: The certificate authority (optional). + - `tls.crt`: The certificate (or a chain). - `tls.key`: + The private key to the first certificate in the certificate + chain." + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + selfSignedCertificate: + description: SelfSignedCertificate allows configuring the + self-signed certificate generated by the operator. + properties: + disabled: + description: Disabled indicates that the provisioning + of the self-signed certifcate should be disabled. + type: boolean + subjectAltNames: + description: SubjectAlternativeNames is a list of SANs + to include in the generated HTTP TLS certificate. + items: + description: SubjectAlternativeName represents a SAN + entry in a x509 certificate. + properties: + dns: + description: DNS is the DNS name of the subject. + type: string + ip: + description: IP is the IP address of the subject. + type: string + type: object + type: array + type: object + type: object + type: object + type: array + version: + description: Version of the Logstash. + type: string + required: + - version + type: object + status: + description: LogstashStatus defines the observed state of Logstash + properties: + availableNodes: + format: int32 + type: integer + elasticsearchAssociationsStatus: + additionalProperties: + description: AssociationStatus is the status of an association resource. + type: string + description: ElasticsearchAssociationStatus is the status of any auto-linking + to Elasticsearch clusters. + type: object + expectedNodes: + format: int32 + type: integer + monitoringAssociationStatus: + additionalProperties: + description: AssociationStatus is the status of an association resource. + type: string + description: MonitoringAssociationStatus is the status of any auto-linking + to monitoring Elasticsearch clusters. + type: object + observedGeneration: + description: ObservedGeneration is the most recent generation observed + for this Logstash instance. It corresponds to the metadata generation, + which is updated on mutation by the API Server. If the generation + observed in status diverges from the generation in metadata, the + Logstash controller has not yet processed the changes contained + in the Logstash specification. + format: int64 + type: integer + version: + description: 'Version of the stack resource currently running. During + version upgrades, multiple versions may run in parallel: this value + specifies the lowest version currently running.' + type: string + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.count + statusReplicasPath: .status.count + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.4 diff --git a/config/crds/v1/bases/kustomization.yaml b/config/crds/v1/bases/kustomization.yaml index 07d18db313..1c750971ee 100644 --- a/config/crds/v1/bases/kustomization.yaml +++ b/config/crds/v1/bases/kustomization.yaml @@ -8,3 +8,4 @@ resources: - agent.k8s.elastic.co_agents.yaml - maps.k8s.elastic.co_elasticmapsservers.yaml - stackconfigpolicy.k8s.elastic.co_stackconfigpolicies.yaml + - logstash.k8s.elastic.co_logstashes.yaml diff --git a/config/crds/v1/bases/logstash.k8s.elastic.co_logstashes.yaml b/config/crds/v1/bases/logstash.k8s.elastic.co_logstashes.yaml new file mode 100644 index 0000000000..506530e036 --- /dev/null +++ b/config/crds/v1/bases/logstash.k8s.elastic.co_logstashes.yaml @@ -0,0 +1,8196 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.4 + name: logstashes.logstash.k8s.elastic.co +spec: + group: logstash.k8s.elastic.co + names: + categories: + - elastic + kind: Logstash + listKind: LogstashList + plural: logstashes + shortNames: + - ls + singular: logstash + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Available nodes + jsonPath: .status.availableNodes + name: available + type: integer + - description: Expected nodes + jsonPath: .status.expectedNodes + name: expected + type: integer + - jsonPath: .metadata.creationTimestamp + name: age + type: date + - description: Logstash version + jsonPath: .status.version + name: version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Logstash is the Schema for the logstashes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LogstashSpec defines the desired state of Logstash + properties: + config: + description: Config holds the Logstash configuration. At most one + of [`Config`, `ConfigRef`] can be specified. + type: object + x-kubernetes-preserve-unknown-fields: true + configRef: + description: ConfigRef contains a reference to an existing Kubernetes + Secret holding the Logstash configuration. Logstash settings must + be specified as yaml, under a single "logstash.yml" entry. At most + one of [`Config`, `ConfigRef`] can be specified. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + count: + format: int32 + type: integer + elasticsearchRefs: + description: ElasticsearchRefs are references to Elasticsearch clusters + running in the same Kubernetes cluster. + items: + description: ElasticsearchCluster is a named reference to an Elasticsearch + cluster which can be used in a Logstash pipeline. + properties: + clusterName: + description: ClusterName is an alias for the cluster to be used + to refer to the Elasticsearch cluster in Logstash configuration + files, and will be used to identify "named clusters" in Logstash + minLength: 1 + type: string + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If empty, defaults + to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing Kubernetes + secret that contains connection information for associating + an Elastic resource not managed by the operator. The referenced + secret must contain the following: - `url`: the URL to reach + the Elastic resource - `username`: the username of the user + to be authenticated to the Elastic resource - `password`: + the password of the user to be authenticated to the Elastic + resource - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the other fields + name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing Kubernetes + service which is used to make requests to the referenced object. + It has to be in the same namespace as the referenced resource. + If left empty, the default HTTP service of the referenced + resource is used. + type: string + type: object + type: array + image: + description: Image is the Logstash Docker image to deploy. Version + and Type have to match the Logstash in the image. + type: string + monitoring: + description: Monitoring enables you to collect and ship log and monitoring + data of this Logstash. Metricbeat and Filebeat are deployed in the + same Pod as sidecars and each one sends data to one or two different + Elasticsearch monitoring clusters running in the same Kubernetes + cluster. + properties: + logs: + description: Logs holds references to Elasticsearch clusters which + receive log data from an associated resource. + properties: + elasticsearchRefs: + description: ElasticsearchRefs is a reference to a list of + monitoring Elasticsearch clusters running in the same Kubernetes + cluster. Due to existing limitations, only a single Elasticsearch + cluster is currently supported. + items: + description: ObjectSelector defines a reference to a Kubernetes + object which can be an Elastic resource managed by the + operator or a Secret describing an external Elastic resource + not managed by the operator. + properties: + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If + empty, defaults to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing + Kubernetes secret that contains connection information + for associating an Elastic resource not managed by + the operator. The referenced secret must contain the + following: - `url`: the URL to reach the Elastic resource + - `username`: the username of the user to be authenticated + to the Elastic resource - `password`: the password + of the user to be authenticated to the Elastic resource + - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the + other fields name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing + Kubernetes service which is used to make requests + to the referenced object. It has to be in the same + namespace as the referenced resource. If left empty, + the default HTTP service of the referenced resource + is used. + type: string + type: object + type: array + type: object + metrics: + description: Metrics holds references to Elasticsearch clusters + which receive monitoring data from this resource. + properties: + elasticsearchRefs: + description: ElasticsearchRefs is a reference to a list of + monitoring Elasticsearch clusters running in the same Kubernetes + cluster. Due to existing limitations, only a single Elasticsearch + cluster is currently supported. + items: + description: ObjectSelector defines a reference to a Kubernetes + object which can be an Elastic resource managed by the + operator or a Secret describing an external Elastic resource + not managed by the operator. + properties: + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If + empty, defaults to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing + Kubernetes secret that contains connection information + for associating an Elastic resource not managed by + the operator. The referenced secret must contain the + following: - `url`: the URL to reach the Elastic resource + - `username`: the username of the user to be authenticated + to the Elastic resource - `password`: the password + of the user to be authenticated to the Elastic resource + - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the + other fields name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing + Kubernetes service which is used to make requests + to the referenced object. It has to be in the same + namespace as the referenced resource. If left empty, + the default HTTP service of the referenced resource + is used. + type: string + type: object + type: array + type: object + type: object + pipelines: + description: Pipelines holds the Logstash Pipelines. At most one of + [`Pipelines`, `PipelinesRef`] can be specified. + items: + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + pipelinesRef: + description: PipelinesRef contains a reference to an existing Kubernetes + Secret holding the Logstash Pipelines. Logstash pipelines must be + specified as yaml, under a single "pipelines.yml" entry. At most + one of [`Pipelines`, `PipelinesRef`] can be specified. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + podTemplate: + description: PodTemplate provides customisation options for the Logstash + pods. + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: 'Specification of the desired behavior of the pod. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + activeDeadlineSeconds: + description: Optional duration in seconds the pod may be active + on the node relative to StartTime before the system will + actively try to mark it failed and kill associated containers. + Value must be a positive integer. + format: int64 + type: integer + affinity: + description: If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term + matches all objects with implicit weight 0 (i.e. + it's a no-op). A null preferred scheduling term + matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to an update), the system may or may not try + to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, the + values array must have a single + element, which will be interpreted + as an integer. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if + the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + affinity requirements specified by this field cease + to be met at some point during pod execution (e.g. + due to a pod label update), the system may or may + not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the + greatest sum of weights, i.e. for each node that + meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating through + the elements of this field and adding "weight" to + the sum if the node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest sum + are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of + resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies to. + The term is applied to the union of the + namespaces selected by this field and + the ones listed in the namespaces field. + null selector and null or empty namespaces + list means "this pod's namespace". An + empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in the + range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the + anti-affinity requirements specified by this field + cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may + or may not try to eventually evict the pod from + its node. When there are multiple elements, the + lists of nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those + matching the labelSelector relative to the given + namespace(s)) that this pod should be co-located + (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node + whose value of the label with key + matches that of any node on which a pod of the + set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by + this field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected + by namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the + pods matching the labelSelector in the specified + namespaces, where co-located is defined as + running on a node whose value of the label + with key topologyKey matches that of any node + on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + description: AutomountServiceAccountToken indicates whether + a service account token should be automatically mounted. + type: boolean + containers: + description: List of containers belonging to the pod. Containers + cannot currently be added or removed. There must be at least + one container in a Pod. Cannot be updated. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. Double + $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the + variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If + a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in + the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is + starting. When a key exists in multiple sources, the + value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is + specified, or IfNotPresent otherwise. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should + take in response to container lifecycle events. Cannot + be updated. + properties: + postStart: + description: 'PostStart is called immediately after + a container is created. If the handler fails, + the container is terminated and restarted according + to its restart policy. Other management of the + container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before + a container is terminated due to an API request + or management event such as liveness/startup probe + failure, preemption, resource contention, etc. + The handler is not called if the container crashes + or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the + container will eventually terminate within the + Pod''s termination grace period (unless delayed + by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. + Container will be restarted if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that port + from being exposed. Any port which is listening on + the default "0.0.0.0" address inside a container will + be accessible from the network. Modifying this array + with strategic merge patch may corrupt the data. For + more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the + port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if + the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field and + requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can + only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the + Pod where this field is used. It makes that + resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options + the container should be run with. If set, the fields + of SecurityContext override the equivalent fields + of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN Note that this field cannot + be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. Note that this + field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. Note that this field cannot be set + when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has + successfully initialized. If specified, no other probes + are executed until this completes successfully. If + this probe fails, the Pod will be restarted, just + as if the livenessProbe failed. This can be used to + provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time + to load data or warm a cache, than during steady-state + operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will + always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, + is empty until the first client attaches to stdin, + and then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such + as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be + updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot + be updated. + type: string + required: + - name + type: object + type: array + dnsConfig: + description: Specifies the DNS parameters of a pod. Parameters + specified here will be merged to the generated DNS configuration + based on DNSPolicy. + properties: + nameservers: + description: A list of DNS name server IP addresses. This + will be appended to the base nameservers generated from + DNSPolicy. Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: A list of DNS resolver options. This will + be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options + given in Options will override those that appear in + the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: A list of DNS search domains for host-name + lookup. This will be appended to the base search paths + generated from DNSPolicy. Duplicated search paths will + be removed. + items: + type: string + type: array + type: object + dnsPolicy: + description: Set DNS policy for the pod. Defaults to "ClusterFirst". + Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', + 'Default' or 'None'. DNS parameters given in DNSConfig will + be merged with the policy selected with DNSPolicy. To have + DNS options set along with hostNetwork, you have to specify + DNS policy explicitly to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates whether information + about services should be injected into pod''s environment + variables, matching the syntax of Docker links. Optional: + Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers run in this pod. + Ephemeral containers may be run in an existing pod to perform + user-initiated actions such as debugging. This list cannot + be specified when creating a pod, and it cannot be modified + by updating the pod spec. In order to add an ephemeral container + to an existing pod, use the pod's ephemeralcontainers subresource. + items: + description: "An EphemeralContainer is a temporary container + that you may add to an existing Pod for user-initiated + activities such as debugging. Ephemeral containers have + no resource or scheduling guarantees, and they will not + be restarted when they exit or when a Pod is removed or + restarted. The kubelet may evict a Pod if an ephemeral + container causes the Pod to exceed its resource allocation. + \n To add an ephemeral container, use the ephemeralcontainers + subresource of an existing Pod. Ephemeral containers may + not be removed or restarted." + properties: + args: + description: 'Arguments to the entrypoint. The image''s + CMD is used if this is not provided. Variable references + $(VAR_NAME) are expanded using the container''s environment. + If a variable cannot be resolved, the reference in + the input string will be unchanged. Double $$ are + reduced to a single $, which allows for escaping the + $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the + variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The image''s ENTRYPOINT is used if this is + not provided. Variable references $(VAR_NAME) are + expanded using the container''s environment. If a + variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in + the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is + starting. When a key exists in multiple sources, the + value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is + specified, or IfNotPresent otherwise. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed for ephemeral + containers. + properties: + postStart: + description: 'PostStart is called immediately after + a container is created. If the handler fails, + the container is terminated and restarted according + to its restart policy. Other management of the + container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before + a container is terminated due to an API request + or management event such as liveness/startup probe + failure, preemption, resource contention, etc. + The handler is not called if the container crashes + or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the + container will eventually terminate within the + Pod''s termination grace period (unless delayed + by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the ephemeral container specified + as a DNS_LABEL. This name must be unique among all + containers, init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for ephemeral containers. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the + port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: Resources are not allowed for ephemeral + containers. Ephemeral containers use spare resources + already allocated to the pod. + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field and + requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can + only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the + Pod where this field is used. It makes that + resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'Optional: SecurityContext defines the + security options the ephemeral container should be + run with. If set, the fields of SecurityContext override + the equivalent fields of PodSecurityContext.' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN Note that this field cannot + be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. Note that this + field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. Note that this field cannot be set + when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: Probes are not allowed for ephemeral containers. + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will + always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, + is empty until the first client attaches to stdin, + and then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + targetContainerName: + description: "If set, the name of the container from + PodSpec that this ephemeral container targets. The + ephemeral container will be run in the namespaces + (IPC, PID, etc) of this container. If not set then + the ephemeral container uses the namespaces configured + in the Pod spec. \n The container runtime must implement + support for this feature. If the runtime does not + support namespace targeting then the result of setting + this field is undefined." + type: string + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such + as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be + updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Subpath mounts are not allowed for ephemeral + containers. Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot + be updated. + type: string + required: + - name + type: object + type: array + hostAliases: + description: HostAliases is an optional list of hosts and + IPs that will be injected into the pod's hosts file if specified. + This is only valid for non-hostNetwork pods. + items: + description: HostAlias holds the mapping between IP and + hostnames that will be injected as an entry in the pod's + hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostIPC: + description: 'Use the host''s ipc namespace. Optional: Default + to false.' + type: boolean + hostNetwork: + description: Host networking requested for this pod. Use the + host's network namespace. If this option is set, the ports + that will be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. Optional: Default + to false.' + type: boolean + hostUsers: + description: 'Use the host''s user namespace. Optional: Default + to true. If set to true or not present, the pod will be + run in the host user namespace, useful for when the pod + needs a feature only available to the host user namespace, + such as loading a kernel module with CAP_SYS_MODULE. When + set to false, a new userns is created for the pod. Setting + false is useful for mitigating container breakout vulnerabilities + even allowing users to run their containers as root without + actually having root privileges on the host. This field + is alpha-level and is only honored by servers that enable + the UserNamespacesSupport feature.' + type: boolean + hostname: + description: Specifies the hostname of the Pod If not specified, + the pod's hostname will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional list of references + to secrets in the same namespace to use for pulling any + of the images used by this PodSpec. If specified, these + secrets will be passed to individual puller implementations + for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + items: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same + namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: 'List of initialization containers belonging + to the pod. Init containers are executed in order prior + to containers being started. If any init container fails, + the pod is considered to have failed and is handled according + to its restartPolicy. The name for an init container or + normal container must be unique among all containers. Init + containers may not have Lifecycle actions, Readiness probes, + Liveness probes, or Startup probes. The resourceRequirements + of an init container are taken into account during scheduling + by finding the highest request/limit for each resource type, + and then using the max of of that value or the sum of the + normal containers. Limits are applied to init containers + in a similar fashion. Init containers cannot currently be + added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container + image''s CMD is used if this is not provided. Variable + references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the + reference in the input string will be unchanged. Double + $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce + the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the + variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within + a shell. The container image''s ENTRYPOINT is used + if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If + a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in + the container. Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) + are expanded using the previously defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" + will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults + to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, + status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment + variables in the container. The keys defined within + a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is + starting. When a key exists in multiple sources, the + value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config + management to default or override container images + in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, + IfNotPresent. Defaults to Always if :latest tag is + specified, or IfNotPresent otherwise. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should + take in response to container lifecycle events. Cannot + be updated. + properties: + postStart: + description: 'PostStart is called immediately after + a container is created. If the handler fails, + the container is terminated and restarted according + to its restart policy. Other management of the + container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before + a container is terminated due to an API request + or management event such as liveness/startup probe + failure, preemption, resource contention, etc. + The handler is not called if the container crashes + or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the + container will eventually terminate within the + Pod''s termination grace period (unless delayed + by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line + to execute inside the container, the working + directory for the command is root ('/') + in the container's filesystem. The command + is simply exec'd, it is not run inside + a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, + you need to explicitly call out to that + shell. Exit status of 0 is treated as + live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting + to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward + compatibility. There are no validation of + this field and lifecycle hooks will fail in + runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port + to access on the container. Number must + be in the range 1 to 65535. Name must + be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. + Container will be restarted if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. + Not specifying a port here DOES NOT prevent that port + from being exposed. Any port which is listening on + the default "0.0.0.0" address inside a container will + be accessible from the network. Modifying this array + with strategic merge patch may corrupt the data. For + more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's + IP address. This must be a valid port number, + 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: Number of port to expose on the host. + If specified, this must be a valid port number, + 0 < x < 65536. If HostNetwork is specified, + this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in + a pod must have a unique name. Name for the + port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, + or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if + the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field and + requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can + only be set for containers." + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one + entry in pod.spec.resourceClaims of the + Pod where this field is used. It makes that + resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options + the container should be run with. If set, the fields + of SecurityContext override the equivalent fields + of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls + whether a process can gain more privileges than + its parent process. This bool directly controls + if the no_new_privs flag will be set on the container + process. AllowPrivilegeEscalation is true always + when the container is: 1) run as Privileged 2) + has CAP_SYS_ADMIN Note that this field cannot + be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running + containers. Defaults to the default set of capabilities + granted by the container runtime. Note that this + field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent + to root on the host. Defaults to false. Note that + this field cannot be set when spec.os.name is + windows. + type: boolean + procMount: + description: procMount denotes the type of proc + mount to use for the containers. The default is + DefaultProcMount which uses the container runtime + defaults for readonly paths and masked paths. + This requires the ProcMountType feature flag to + be enabled. Note that this field cannot be set + when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only + root filesystem. Default is false. Note that this + field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the + container process. Uses runtime default if unset. + May also be set in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run + as a non-root user. If true, the Kubelet will + validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no + such validation will be performed. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the + container process. Defaults to user specified + in image metadata if unspecified. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in + SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to + the container. If unspecified, the container runtime + will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this + container. If seccomp options are provided at + both the pod & container level, the container + options override the pod options. Note that this + field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + The profile must be preconfigured on the node + to work. Must be a descending path, relative + to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: + \n Localhost - a profile defined in a file + on the node should be used. RuntimeDefault + - the container runtime default profile should + be used. Unconfined - no profile should be + applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied + to all containers. If unspecified, the options + from the PodSecurityContext will be used. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the + GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential + spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + This field is alpha-level and will only be + honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the + feature flag will result in errors when validating + the Pod. All of a Pod's containers must have + the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, + if HostProcess is true then HostNetwork must + also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run + the entrypoint of the container process. Defaults + to the user specified in image metadata if + unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes + precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has + successfully initialized. If specified, no other probes + are executed until this completes successfully. If + this probe fails, the Pod will be restarted, just + as if the livenessProbe failed. This can be used to + provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time + to load data or warm a cache, than during steady-state + operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to + execute inside the container, the working + directory for the command is root ('/') in + the container's filesystem. The command is + simply exec'd, it is not run inside a shell, + so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is + treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the + probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is a beta field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service + to place in the gRPC HealthCheckRequest (see + https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: Host name to connect to, defaults + to the pod IP. You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to + the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container + has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the + probe. Default to 10 seconds. Minimum value is + 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the + probe to be considered successful after having + failed. Defaults to 1. Must be 1 for liveness + and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod + needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after + the processes running in the pod are sent a termination + signal and the time when the processes are forcibly + halted with a kill signal. Set this value longer + than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides + the value provided by the pod spec. Value must + be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity + to shut down). This is a beta field and requires + enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds + is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the + probe times out. Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate + a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will + always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close + the stdin channel after it has been opened by a single + attach. When stdin is true the stdin stream will remain + open across multiple attach sessions. If stdinOnce + is set to true, stdin is opened on container start, + is empty until the first client attaches to stdin, + and then remains open and accepts data until the client + disconnects, at which time stdin is closed and remains + closed until the container is restarted. If this flag + is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which + the container''s termination message will be written + is mounted into the container''s filesystem. Message + written is intended to be brief final status, such + as an assertion failure message. Will be truncated + by the node if greater than 4096 bytes. The total + message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot + be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should + be populated. File will use the contents of terminationMessagePath + to populate the container status message on both success + and failure. FallbackToLogsOnError will use the last + chunk of container log output if the termination message + file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, + whichever is smaller. Defaults to File. Cannot be + updated. + type: string + tty: + description: Whether this container should allocate + a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's + filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: Path within the container at which + the volume should be mounted. Must not contain + ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts + are propagated from the host to container and + the other way around. When not set, MountPropagationNone + is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write + otherwise (false or unspecified). Defaults to + false. + type: boolean + subPath: + description: Path within the volume from which + the container's volume should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + Behaves similarly to SubPath but environment + variable references $(VAR_NAME) are expanded + using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath + are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which + might be configured in the container image. Cannot + be updated. + type: string + required: + - name + type: object + type: array + nodeName: + description: NodeName is a request to schedule this pod onto + a specific node. If it is non-empty, the scheduler simply + schedules this pod onto that node, assuming that it fits + resource requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the pod to fit on a node. Selector which must match + a node''s labels for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the pod. + Some pod and container fields are restricted if this is + set. \n If the OS field is set to linux, the following fields + must be unset: -securityContext.windowsOptions \n If the + OS field is set to windows, following fields must be unset: + - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls + - spec.shareProcessNamespace - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup" + properties: + name: + description: 'Name is the name of the operating system. + The currently supported values are linux and windows. + Additional value may be defined in future and can be + one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values and + treat unrecognized values in this field as os: null' + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Overhead represents the resource overhead associated + with running a pod for a given RuntimeClass. This field + will be autopopulated at admission time by the RuntimeClass + admission controller. If the RuntimeClass admission controller + is enabled, overhead must not be set in Pod create requests. + The RuntimeClass admission controller will reject Pod create + requests which have the overhead already set. If RuntimeClass + is configured and selected in the PodSpec, Overhead will + be set to the value defined in the corresponding RuntimeClass, + otherwise it will remain unset and treated as zero. More + info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md' + type: object + preemptionPolicy: + description: PreemptionPolicy is the Policy for preempting + pods with lower priority. One of Never, PreemptLowerPriority. + Defaults to PreemptLowerPriority if unset. + type: string + priority: + description: The priority value. Various system components + use this field to find the priority of the pod. When Priority + Admission Controller is enabled, it prevents users from + setting this field. The admission controller populates this + field from PriorityClassName. The higher the value, the + higher the priority. + format: int32 + type: integer + priorityClassName: + description: If specified, indicates the pod's priority. "system-node-critical" + and "system-cluster-critical" are two special keywords which + indicate the highest priorities with the former being the + highest priority. Any other name must be defined by creating + a PriorityClass object with that name. If not specified, + the pod priority will be default or zero if there is no + default. + type: string + readinessGates: + description: 'If specified, all readiness gates will be evaluated + for pod readiness. A pod is ready when all its containers + are ready AND all conditions specified in the readiness + gates have status equal to "True" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates' + items: + description: PodReadinessGate contains the reference to + a pod condition + properties: + conditionType: + description: ConditionType refers to a condition in + the pod's condition list with matching type. + type: string + required: + - conditionType + type: object + type: array + resourceClaims: + description: "ResourceClaims defines which ResourceClaims + must be allocated and reserved before the Pod is allowed + to start. The resources will be made available to those + containers which consume them by name. \n This is an alpha + field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: PodResourceClaim references exactly one ResourceClaim + through a ClaimSource. It adds a name to it that uniquely + identifies the ResourceClaim inside the Pod. Containers + that need access to the ResourceClaim reference it with + this name. + properties: + name: + description: Name uniquely identifies this resource + claim inside the pod. This must be a DNS_LABEL. + type: string + source: + description: Source describes where to find the ResourceClaim. + properties: + resourceClaimName: + description: ResourceClaimName is the name of a + ResourceClaim object in the same namespace as + this pod. + type: string + resourceClaimTemplateName: + description: "ResourceClaimTemplateName is the name + of a ResourceClaimTemplate object in the same + namespace as this pod. \n The template will be + used to create a new ResourceClaim, which will + be bound to this pod. When this pod is deleted, + the ResourceClaim will also be deleted. The name + of the ResourceClaim will be -, where is the PodResourceClaim.Name. + Pod validation will reject the pod if the concatenated + name is not valid for a ResourceClaim (e.g. too + long). \n An existing ResourceClaim with that + name that is not owned by the pod will not be + used for the pod to avoid using an unrelated resource + by mistake. Scheduling and pod startup are then + blocked until the unrelated ResourceClaim is removed. + \n This field is immutable and no changes will + be made to the corresponding ResourceClaim by + the control plane after creating the ResourceClaim." + type: string + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + restartPolicy: + description: 'Restart policy for all containers within the + pod. One of Always, OnFailure, Never. Default to Always. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a RuntimeClass object + in the node.k8s.io group, which should be used to run this + pod. If no RuntimeClass resource matches the named class, + the pod will not be run. If unset or empty, the "legacy" + RuntimeClass will be used, which is an implicit class with + an empty definition that uses the default runtime handler. + More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class' + type: string + schedulerName: + description: If specified, the pod will be dispatched by specified + scheduler. If not specified, the pod will be dispatched + by default scheduler. + type: string + schedulingGates: + description: "SchedulingGates is an opaque list of values + that if specified will block scheduling the pod. More info: + \ https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness. + \n This is an alpha-level feature enabled by PodSchedulingReadiness + feature gate." + items: + description: PodSchedulingGate is associated to a Pod to + guard its scheduling. + properties: + name: + description: Name of the scheduling gate. Each scheduling + gate must have a unique name field. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + description: 'SecurityContext holds pod-level security attributes + and common container settings. Optional: Defaults to empty. See + type description for default values of each field.' + properties: + fsGroup: + description: "A special supplemental group that applies + to all containers in a pod. Some volume types allow + the Kubelet to change the ownership of that volume to + be owned by the pod: \n 1. The owning GID will be the + FSGroup 2. The setgid bit is set (new files created + in the volume will be owned by FSGroup) 3. The permission + bits are OR'd with rw-rw---- \n If unset, the Kubelet + will not modify the ownership and permissions of any + volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of + changing ownership and permission of the volume before + being exposed inside Pod. This field will only apply + to volume types which support fsGroup based ownership(and + permissions). It will have no effect on ephemeral volume + types such as: secret, configmaps and emptydir. Valid + values are "OnRootMismatch" and "Always". If not specified, + "Always" is used. Note that this field cannot be set + when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be + set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this + field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as + a non-root user. If true, the Kubelet will validate + the image at runtime to ensure that it does not run + as UID 0 (root) and fail to start the container if it + does. If unset or false, no such validation will be + performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence + for that container. Note that this field cannot be set + when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all + containers. If unspecified, the container runtime will + allocate a random SELinux context for each container. May + also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this + field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers + in this pod. Note that this field cannot be set when + spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. The + profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's + configured seccomp profile location. Must only be + set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp + profile will be applied. Valid options are: \n Localhost + - a profile defined in a file on the node should + be used. RuntimeDefault - the container runtime + default profile should be used. Unconfined - no + profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process + run in each container, in addition to the container's + primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container + process. If unspecified, no additional groups are added + to any container. Note that group memberships defined + in the container image for the uid of the container + process are still effective, even if they are not included + in this list. Note that this field cannot be set when + spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls + used for the pod. Pods with unsupported sysctls (by + the container runtime) might fail to launch. Note that + this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to + all containers. If unspecified, the options within a + container's SecurityContext will be used. If set in + both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA + admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec + named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. This + field is alpha-level and will only be honored by + components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature + flag will result in errors when validating the Pod. + All of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + serviceAccount: + description: 'DeprecatedServiceAccount is a depreciated alias + for ServiceAccountName. Deprecated: Use serviceAccountName + instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name of the ServiceAccount + to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + setHostnameAsFQDN: + description: If true the pod's hostname will be configured + as the pod's FQDN, rather than the leaf name (the default). + In Linux containers, this means setting the FQDN in the + hostname field of the kernel (the nodename field of struct + utsname). In Windows containers, this means setting the + registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters + to FQDN. If a pod does not have FQDN, this has no effect. + Default to false. + type: boolean + shareProcessNamespace: + description: 'Share a single process namespace between all + of the containers in a pod. When this is set containers + will be able to view and signal processes from other containers + in the same pod, and the first process in each container + will not be assigned PID 1. HostPID and ShareProcessNamespace + cannot both be set. Optional: Default to false.' + type: boolean + subdomain: + description: If specified, the fully qualified Pod hostname + will be "...svc.". If not specified, the pod will not have a domainname + at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully. May be decreased in delete request. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). If this value is nil, the default grace period + will be used instead. The grace period is the duration in + seconds after the processes running in the pod are sent + a termination signal and the time when the processes are + forcibly halted with a kill signal. Set this value longer + than the expected cleanup time for your process. Defaults + to 30 seconds. + format: int64 + type: integer + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: TopologySpreadConstraints describes how a group + of pods ought to spread across topology domains. Scheduler + will schedule pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching + pods. Pods that match this label selector are counted + to determine the number of pods in their corresponding + topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys + to select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming + pod labels, those key-value labels are ANDed with + labelSelector to select the group of existing pods + over which spreading will be calculated for the incoming + pod. Keys that don't exist in the incoming pod labels + will be ignored. A null or empty list means only match + against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which + pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the + number of matching pods in the target topology and + the global minimum. The global minimum is the minimum + number of matching pods in an eligible domain or zero + if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to + 1, and pods with the same labelSelector spread as + 2/2/1: In this case, the global minimum is 1. | zone1 + | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 + to become 2/2/2; scheduling it onto zone1(zone2) would + make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto + any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies + that satisfy it. It''s a required field. Default value + is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number + of eligible domains. When the number of eligible domains + with matching topology keys is less than minDomains, + Pod Topology Spread treats \"global minimum\" as 0, + and then the calculation of Skew is performed. And + when the number of eligible domains with matching + topology keys equals or greater than minDomains, this + value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to + those domains. If value is nil, the constraint behaves + as if MinDomains is equal to 1. Valid values are integers + greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone + cluster, MaxSkew is set to 2, MinDomains is set to + 5 and pods with the same labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), + so \"global minimum\" is treated as 0. In this situation, + new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod + is scheduled to any of the three zones, it will violate + MaxSkew. \n This is a beta field and requires the + MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will + treat Pod's nodeAffinity/nodeSelector when calculating + pod topology spread skew. Options are: - Honor: only + nodes matching nodeAffinity/nodeSelector are included + in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. + \n If this value is nil, the behavior is equivalent + to the Honor policy. This is a beta-level feature + default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will + treat node taints when calculating pod topology spread + skew. Options are: - Honor: nodes without taints, + along with tainted nodes for which the incoming pod + has a toleration, are included. - Ignore: node taints + are ignored. All nodes are included. \n If this value + is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. + Nodes that have a label with this key and identical + values are considered to be in the same topology. + We consider each as a "bucket", and try + to put balanced number of pods into each bucket. We + define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose + nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal + with a pod if it doesn''t satisfy the spread constraint. + - DoNotSchedule (default) tells the scheduler not + to schedule it. - ScheduleAnyway tells the scheduler + to schedule the pod in any location, but giving higher + precedence to topologies that would help reduce the + skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node + assignment for that pod would violate "MaxSkew" on + some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector + spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P + | P | P | If WhenUnsatisfiable is set to DoNotSchedule, + incoming pod can only be scheduled to zone2(zone3) + to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) + satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make + it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be mounted by containers + belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes' + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS + Disk resource that is attached to a kubelet''s host + machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the + volume that you want to mount. If omitted, the + default is to mount by volume name. Examples: + For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda + is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the + readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent + disk resource in AWS (Amazon EBS volume). More + info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is + a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to + false (read/write). ReadOnly here will force the + ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile + is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is + reference to the authentication secret for User, + default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is optional: User is the rados + user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached + and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a + secret object containing parameters used to connect + to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: 'volumeID used to identify the volume + in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the ConfigMap, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. Must + be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON + requires decimal values for mode bits. If + not specified, the volume defaultMode will + be used. This might be in conflict with + other options that affect the file mode, + like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be an + absolute path. May not contain the path + element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. Consult with your admin + for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed + to the associated CSI driver which will determine + the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference + to the secret object containing sensitive information + to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no + secret is required. If the secret object contains + more than one secret, all secret references are + passed. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific + properties that are passed to the CSI driver. + Consult your driver's documentation for supported + values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name + and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be an + octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both + octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This + might be in conflict with other options + that affect the file mode, like fsGroup, + and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory + that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage + medium should back this directory. The default + is "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More + info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local + storage required for this EmptyDir volume. The + size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. The default is nil which means that + the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is + handled by a cluster storage driver. The volume's + lifecycle is tied to the pod that defines it - it + will be created before the pod starts, and deleted + when the pod is removed. \n Use this if: a) the volume + is only needed while the pod runs, b) features of + normal volumes like restoring from snapshot or capacity + tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver + supports dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on + the connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the + lifecycle of an individual pod. \n Use CSI for light-weight + local ephemeral volumes if the CSI driver is meant + to be used that way - see the documentation of the + driver for more information. \n A pod can use both + types of ephemeral volumes and persistent volumes + at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone + PVC to provision the volume. The pod in which + this EphemeralVolumeSource is embedded will be + the owner of the PVC, i.e. the PVC will be deleted + together with the pod. The name of the PVC will + be `-` where `` + is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too + long). \n An existing PVC with that name that + is not owned by the pod will *not* be used for + the pod to avoid using an unrelated volume by + mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created + PVC is meant to be used by the pod, the PVC has + to updated with an owner reference to the pod + once the pod exists. Normally this should not + be necessary, but it may be useful when manually + reconstructing a broken cluster. \n This field + is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, + must not be nil." + properties: + metadata: + description: May contain labels and annotations + that will be copied into the PVC when creating + it. No other fields are allowed and will be + rejected during validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into + the PVC that gets created from this template. + The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired + access modes the volume should have. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used + to specify either: * An existing VolumeSnapshot + object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller + can support the specified data source, + it will create a new volume based on the + contents of the specified data source. + When the AnyVolumeDataSource feature gate + is enabled, dataSource contents will be + copied to dataSourceRef, and dataSourceRef + contents will be copied to dataSource + when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef + will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. If + APIGroup is not specified, the specified + Kind must be in the core API group. + For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the + object from which to populate the volume + with data, if a non-empty volume is desired. + This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, + volume binding will only succeed if the + type of the specified object matches some + installed volume populator or dynamic + provisioner. This field will replace the + functionality of the dataSource field + and as such if both fields are non-empty, + they must have the same value. For backwards + compatibility, when namespace isn''t specified + in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the + same value automatically if one of them + is empty and the other is non-empty. When + namespace is specified in dataSourceRef, + dataSource isn''t set to the same value + and must be empty. There are three important + differences between dataSource and dataSourceRef: + * While dataSource only allows two specific + types of objects, dataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores disallowed + values (dropping them), dataSourceRef + preserves all values, and generates an + error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using + the namespace field of dataSourceRef requires + the CrossNamespaceVolumeDataSource feature + gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. If + APIGroup is not specified, the specified + Kind must be in the core API group. + For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: Namespace is the namespace + of resource being referenced Note + that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent + namespace to allow that namespace's + owner to accept the reference. See + the ReferenceGrant documentation for + details. (Alpha) This field requires + the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum + resources the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to + specify resource requirements that are + lower than previous value but must still + be higher than capacity recorded in the + status field of the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names + of resources, defined in spec.resourceClaims, + that are used by this container. \n + This is an alpha field and requires + enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the + name of one entry in pod.spec.resourceClaims + of the Pod where this field + is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the + minimum amount of compute resources + required. If Requests is omitted for + a container, it defaults to Limits + if that is explicitly specified, otherwise + to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name + of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. Value + of Filesystem is implied when not included + in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. TODO: how + do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to + false (read/write). ReadOnly here will force the + ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide + identifiers (wwids) Either wwids or combination + of targetWWNs and lun must be set, but not both + simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume + resource that is provisioned/attached using an exec + based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". The + default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to + false (read/write). ReadOnly here will force the + ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is + reference to the secret object containing sensitive + information to pass to the plugin scripts. This + may be empty if no secret object is specified. + If the secret object contains more than one secret, + all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: datasetName is Name of the dataset + stored as metadata -> name on the dataset for + Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk + resource that is attached to a kubelet''s host machine + and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the + volume that you want to mount. If omitted, the + default is to mount by volume name. Examples: + For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for /dev/sda + is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource + in GCE. Used to identify the disk in GCE. More + info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at + a particular revision. DEPRECATED: GitRepo is deprecated. + To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo + using git, then mount the EmptyDir into the Pod''s + container.' + properties: + directory: + description: directory is the target directory name. + Must not contain or start with '..'. If '.' is + supplied, the volume directory will be the git + repository. Otherwise, if specified, the volume + will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount + on the host that shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs + volume to be mounted with read-only permissions. + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file + or directory on the host machine that is directly + exposed to the container. This is generally used for + system agents or other privileged things that are + allowed to see the host machine. Most containers will + NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use + host directory mounts and who can/can not mount host + directories as read/write.' + properties: + path: + description: 'path of the directory on the host. + If the path is a symlink, it will follow the link + to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults + to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource + that is attached to a kubelet''s host machine and + then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name + that uses an iSCSI transport. Defaults to 'default' + (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal + List. The portal is either an IP or ip_addr:port + if the port is other than default (typically TCP + ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + The Portal is either an IP or ip_addr:port if + the port is other than default (typically TCP + ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL + and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host + that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export + to be mounted with read-only permissions. Defaults + to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address + of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same + namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting + in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type + to mount Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs". + Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path + are not affected by this setting. This might be + in conflict with other options that affect the + file mode, like fsGroup, and the result can be + other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced ConfigMap will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will be + projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not present + in the ConfigMap, the volume setup will + error unless it is marked optional. + Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 or + a decimal value between 0 and + 511. YAML accepts both octal and + decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume defaultMode + will be used. This might be in + conflict with other options that + affect the file mode, like fsGroup, + and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value between + 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts + both octal and decimal values, + JSON requires decimal values for + mode bits. If not specified, the + volume defaultMode will be used. + This might be in conflict with + other options that affect the + file mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource + of the container: only resources + limits and requests (limits.cpu, + limits.memory, requests.cpu and + requests.memory) are currently + supported.' + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced Secret will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will be + projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not present + in the Secret, the volume setup will + error unless it is marked optional. + Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 or + a decimal value between 0 and + 511. YAML accepts both octal and + decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume defaultMode + will be used. This might be in + conflict with other options that + affect the file mode, like fsGroup, + and the result can be other mode + bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended + audience of the token. A recipient of + a token must identify itself with an + identifier specified in the audience + of the token, and otherwise should reject + the token. The audience defaults to + the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the + requested duration of validity of the + service account token. As the token + approaches expiration, the kubelet volume + plugin will proactively rotate the service + account token. The kubelet will start + trying to rotate the token if the token + is older than 80 percent of its time + to live or if the token is older than + 24 hours.Defaults to 1 hour and must + be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative + to the mount point of the file to project + the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default + is no group + type: string + readOnly: + description: readOnly here will force the Quobyte + volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple + Quobyte Registry services specified as a string + as host:port pair (multiple entries are separated + with commas) which acts as the central registry + for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume + in the Backend Used with dynamically provisioned + Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults + to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount + on the host that shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + image: + description: 'image is the rados image name. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for + RBDUser. Default is /etc/ceph/keyring. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default + is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. + Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is the rados user name. Default + is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Default + is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret + for ScaleIO user and other sensitive information. + If this is not provided, Login operation will + fail. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume + already created in the ScaleIO system that is + associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should + populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. + If a key is specified which is not present in + the Secret, the volume setup will error unless + it is marked optional. Paths must be relative + and may not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. Must + be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON + requires decimal values for mode bits. If + not specified, the volume defaultMode will + be used. This might be in conflict with + other options that affect the file mode, + like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be an + absolute path. May not contain the path + element '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret + in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use + for obtaining the StorageOS API credentials. If + not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: volumeName is the human-readable name + of the StorageOS volume. Volume names are only + unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope + of the volume within StorageOS. If no namespace + is specified then the Pod's namespace will be + used. This allows the Kubernetes name scoping + to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default + behaviour. Set to "default" if you are not using + namespaces within StorageOS. Namespaces that do + not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - containers + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + revisionHistoryLimit: + description: RevisionHistoryLimit is the number of revisions to retain + to allow rollback in the underlying StatefulSet. + format: int32 + type: integer + secureSettings: + description: SecureSettings is a list of references to Kubernetes + Secrets containing sensitive configuration options for the Logstash. + Secrets data can be then referenced in the Logstash config using + the Secret's keys or as specified in `Entries` field of each SecureSetting. + items: + description: SecretSource defines a data source based on a Kubernetes + Secret. + properties: + entries: + description: Entries define how to project each key-value pair + in the secret to filesystem paths. If not defined, all keys + will be projected to similarly named paths in the filesystem. + If defined, only the specified keys will be projected to the + corresponding paths. + items: + description: KeyToPath defines how to map a key in a Secret + object to a filesystem path. + properties: + key: + description: Key is the key contained in the secret. + type: string + path: + description: Path is the relative file path to map the + key to. Path must not be an absolute file path and must + not contain any ".." components. + type: string + required: + - key + type: object + type: array + secretName: + description: SecretName is the name of the secret. + type: string + required: + - secretName + type: object + type: array + serviceAccountName: + description: ServiceAccountName is used to check access from the current + resource to Elasticsearch resource in a different namespace. Can + only be used if ECK is enforcing RBAC on references. + type: string + services: + description: 'Services contains details of services that Logstash + should expose - similar to the HTTP layer configuration for the + rest of the stack, but also applicable for more use cases than the + metrics API, as logstash may need to be opened up for other services: + Beats, TCP, UDP, etc, inputs.' + items: + properties: + name: + type: string + service: + description: Service defines the template for the associated + Kubernetes Service object. + properties: + metadata: + description: ObjectMeta is the metadata of the service. + The name and namespace provided here are managed by ECK + and will be ignored. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: Spec is the specification of the service. + properties: + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if + NodePorts will be automatically allocated for services + with type LoadBalancer. Default is "true". It may + be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests + will be respected, regardless of this field. This + field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any + other type. + type: boolean + clusterIP: + description: 'clusterIP is the IP address of the service + and is usually assigned randomly. If an address is + specified manually, is in-range (as per system configuration), + and is not in use, it will be allocated to the service; + otherwise creation of the service will fail. This + field may not be changed through updates unless the + type field is also being changed to ExternalName (which + requires this field to be blank) or the type field + is being changed from ExternalName (in which case + this field may optionally be specified, as describe + above). Valid values are "None", empty string (""), + or a valid IP address. Setting this to "None" makes + a "headless service" (no virtual IP), which is useful + when direct endpoint connections are preferred and + proxying is not required. Only applies to types ClusterIP, + NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation + will fail. This field will be wiped when updating + a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + clusterIPs: + description: "ClusterIPs is a list of IP addresses assigned + to this service, and are usually assigned randomly. + \ If an address is specified manually, is in-range + (as per system configuration), and is not in use, + it will be allocated to the service; otherwise creation + of the service will fail. This field may not be changed + through updates unless the type field is also being + changed to ExternalName (which requires this field + to be empty) or the type field is being changed from + ExternalName (in which case this field may optionally + be specified, as describe above). Valid values are + \"None\", empty string (\"\"), or a valid IP address. + \ Setting this to \"None\" makes a \"headless service\" + (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. + \ Only applies to types ClusterIP, NodePort, and LoadBalancer. + If this field is specified when creating a Service + of type ExternalName, creation will fail. This field + will be wiped when updating a Service to type ExternalName. + \ If this field is not specified, it will be initialized + from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP + have the same value. \n This field may hold a maximum + of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies + field. Both clusterIPs and ipFamilies are governed + by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: externalIPs is a list of IP addresses for + which nodes in the cluster will also accept traffic + for this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes + system. + items: + type: string + type: array + externalName: + description: externalName is the external reference + that discovery mechanisms will return as an alias + for this service (e.g. a DNS CNAME record). No proxying + will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires + `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes + distribute service traffic they receive on one of + the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", + the proxy will configure the service in a way that + assumes that external load balancers will take care + of balancing the service traffic between nodes, and + so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the + client source IP. (Traffic mistakenly sent to a node + with no endpoints will be dropped.) The default value, + "Cluster", uses the standard behavior of routing to + all endpoints evenly (possibly modified by topology + and other features). Note that traffic sent to an + External IP or LoadBalancer IP from within the cluster + will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is + set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, + a value will be automatically allocated. External + systems (e.g. load-balancers) can use this port to + determine if a given node holds endpoints for this + service or not. If this field is specified when creating + a Service which does not need it, creation will fail. + This field will be wiped when updating a Service to + no longer need it (e.g. changing type). This field + cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes + distribute service traffic they receive on the ClusterIP. + If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the + same node as the pod, dropping the traffic if there + are no local endpoints. The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + type: string + ipFamilies: + description: "IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is + usually assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the + cluster, and ipFamilyPolicy allows it, it will be + used; otherwise creation of the service will fail. + This field is conditionally mutable: it allows for + adding or removing a secondary IP family, but it does + not allow changing the primary IP family of the Service. + Valid values are \"IPv4\" and \"IPv6\". This field + only applies to Services of types ClusterIP, NodePort, + and LoadBalancer, and does apply to \"headless\" services. + This field will be wiped when updating a Service to + type ExternalName. \n This field may hold a maximum + of two entries (dual-stack families, in either order). + \ These families must correspond to the values of + the clusterIPs field, if specified. Both clusterIPs + and ipFamilies are governed by the ipFamilyPolicy + field." + items: + description: IPFamily represents the IP Family (IPv4 + or IPv6). This type is used to express the family + of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is + no value provided, then this field will be set to + SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters), or "RequireDualStack" (two + IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend + on the value of this field. This field will be wiped + when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If + specified, the value of this field must be a label-style + identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are + reserved for end-users. This field can only be set + when the Service type is 'LoadBalancer'. If not set, + the default load balancer implementation is used, + today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any + default load balancer implementation (e.g. cloud providers) + should ignore Services that set this field. This field + can only be set when creating or updating a Service + to type 'LoadBalancer'. Once set, it can not be changed. + This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load + balancer is created. This field will be ignored if + the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies + across implementations, and it cannot support dual-stack. + As of Kubernetes v1.24, users are encouraged to use + implementation-specific annotations when available. + This field may be removed in a future API version.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified + client IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by + this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this + port. This field follows standard Kubernetes + label syntax. Un-prefixed names are reserved + for IANA standard service names (as per RFC-6335 + and https://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names + such as mycompany.com/my-custom-protocol. + type: string + name: + description: The name of this port within the + service. This must be a DNS_LABEL. All ports + within a ServiceSpec must have unique names. + When considering the endpoints for a Service, + this must match the 'name' field in the EndpointPort. + Optional if only one ServicePort is defined + on this service. + type: string + nodePort: + description: 'The port on each node on which this + service is exposed when type is NodePort or + LoadBalancer. Usually assigned by the system. + If a value is specified, in-range, and not in + use it will be used, otherwise the operation + will fail. If not specified, a port will be + allocated if this Service requires one. If + this field is specified when creating a Service + which does not need it, creation will fail. + This field will be wiped when updating a Service + to no longer need it (e.g. changing type from + NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by + this service. + format: int32 + type: integer + protocol: + default: TCP + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access + on the pods targeted by the service. Number + must be in the range 1 to 65535. Name must be + an IANA_SVC_NAME. If this is a string, it will + be looked up as a named port in the target Pod''s + container ports. If this is not specified, the + value of the ''port'' field is used (an identity + map). This field is ignored for services with + clusterIP=None, and should be omitted or set + equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that + any agent which deals with endpoints for this Service + should disregard any indications of ready/not-ready. + The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV + DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints + and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" + even if the Pods themselves are not. Agents which + consume only Kubernetes generated endpoints through + the Endpoints or EndpointSlice resources can safely + assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label + keys and values matching this selector. If empty or + not present, the service is assumed to have an external + process managing its endpoints, which Kubernetes will + not modify. Only applies to types ClusterIP, NodePort, + and LoadBalancer. Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to + maintain session affinity. Enable client IP based + session affinity. Must be ClientIP or None. Defaults + to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The + value must be >0 && <=86400(for 1 day) if + ServiceAffinity == "ClientIP". Default value + is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, + ClusterIP, NodePort, and LoadBalancer. "ClusterIP" + allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector + or if that is not specified, by manual construction + of an Endpoints object or EndpointSlice objects. If + clusterIP is "None", no virtual IP is allocated and + the endpoints are published as a set of endpoints + rather than a virtual IP. "NodePort" builds on ClusterIP + and allocates a port on every node which routes to + the same endpoints as the clusterIP. "LoadBalancer" + builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to + the same endpoints as the clusterIP. "ExternalName" + aliases this service to the specified externalName. + Several other fields do not apply to ExternalName + services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + type: object + tls: + description: TLS defines options for configuring TLS for HTTP. + properties: + certificate: + description: "Certificate is a reference to a Kubernetes + secret that contains the certificate and private key for + enabling TLS. The referenced secret should contain the + following: \n - `ca.crt`: The certificate authority (optional). + - `tls.crt`: The certificate (or a chain). - `tls.key`: + The private key to the first certificate in the certificate + chain." + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + selfSignedCertificate: + description: SelfSignedCertificate allows configuring the + self-signed certificate generated by the operator. + properties: + disabled: + description: Disabled indicates that the provisioning + of the self-signed certifcate should be disabled. + type: boolean + subjectAltNames: + description: SubjectAlternativeNames is a list of SANs + to include in the generated HTTP TLS certificate. + items: + description: SubjectAlternativeName represents a SAN + entry in a x509 certificate. + properties: + dns: + description: DNS is the DNS name of the subject. + type: string + ip: + description: IP is the IP address of the subject. + type: string + type: object + type: array + type: object + type: object + type: object + type: array + version: + description: Version of the Logstash. + type: string + required: + - version + type: object + status: + description: LogstashStatus defines the observed state of Logstash + properties: + availableNodes: + format: int32 + type: integer + elasticsearchAssociationsStatus: + additionalProperties: + description: AssociationStatus is the status of an association resource. + type: string + description: ElasticsearchAssociationStatus is the status of any auto-linking + to Elasticsearch clusters. + type: object + expectedNodes: + format: int32 + type: integer + monitoringAssociationStatus: + additionalProperties: + description: AssociationStatus is the status of an association resource. + type: string + description: MonitoringAssociationStatus is the status of any auto-linking + to monitoring Elasticsearch clusters. + type: object + observedGeneration: + description: ObservedGeneration is the most recent generation observed + for this Logstash instance. It corresponds to the metadata generation, + which is updated on mutation by the API Server. If the generation + observed in status diverges from the generation in metadata, the + Logstash controller has not yet processed the changes contained + in the Logstash specification. + format: int64 + type: integer + version: + description: 'Version of the stack resource currently running. During + version upgrades, multiple versions may run in parallel: this value + specifies the lowest version currently running.' + type: string + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.count + statusReplicasPath: .status.count + status: {} diff --git a/config/crds/v1/patches/kustomization.yaml b/config/crds/v1/patches/kustomization.yaml index f8dfffb0f8..3769976ec2 100644 --- a/config/crds/v1/patches/kustomization.yaml +++ b/config/crds/v1/patches/kustomization.yaml @@ -70,4 +70,12 @@ patchesJson6902: kind: CustomResourceDefinition name: elasticmapsservers.maps.k8s.elastic.co path: maps-patches.yaml + # custom patches for Logstash + - target: + group: apiextensions.k8s.io + version: v1 + kind: CustomResourceDefinition + name: logstashes.logstash.k8s.elastic.co + path: logstash-patches.yaml + diff --git a/config/crds/v1/patches/logstash-patches.yaml b/config/crds/v1/patches/logstash-patches.yaml new file mode 100644 index 0000000000..ce8f164770 --- /dev/null +++ b/config/crds/v1/patches/logstash-patches.yaml @@ -0,0 +1,7 @@ +# Using `kubectl apply` stores the complete CRD file as an annotation, +# which may be too big for the annotations size limit. +# One way to mitigate this problem is to remove the (huge) podTemplate properties from the CRD. +# It also avoids the problem of having any k8s-version specific field in the Pod schema, +# that would maybe not match the user's k8s version. +- op: remove + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/podTemplate/properties diff --git a/config/e2e/rbac.yaml b/config/e2e/rbac.yaml index 15c3271d9b..57e393ea43 100644 --- a/config/e2e/rbac.yaml +++ b/config/e2e/rbac.yaml @@ -316,6 +316,19 @@ rules: - update - patch - delete + - apiGroups : + - logstash.k8s.elastic.co + resources: + - logstashes + - logstashes/status + verbs: + - get + - list + - watch + - create + - update + - patch + - delete - apiGroups: - storage.k8s.io resources: diff --git a/config/samples/logstash/logstash.yaml b/config/samples/logstash/logstash.yaml new file mode 100644 index 0000000000..fa49c576de --- /dev/null +++ b/config/samples/logstash/logstash.yaml @@ -0,0 +1,18 @@ +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash-sample +spec: + count: 3 + version: 8.6.1 + config: + log.level: info + api.http.host: "0.0.0.0" + queue.type: memory + pipelines: + - pipeline.id: main + config.string: input { exec { command => 'uptime' interval => 10 } } output { stdout{} } + podTemplate: + spec: + containers: + - name: logstash \ No newline at end of file diff --git a/config/samples/logstash/logstash_es.yaml b/config/samples/logstash/logstash_es.yaml new file mode 100644 index 0000000000..32d0cd834e --- /dev/null +++ b/config/samples/logstash/logstash_es.yaml @@ -0,0 +1,36 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch-sample +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 2 + config: + node.store.allow_mmap: false +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash-sample +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - clusterName: production + name: elasticsearch-sample + pipelines: + - pipeline.id: main + config.string: | + input { exec { command => 'uptime' interval => 10 } } + output { + elasticsearch { + hosts => [ "${PRODUCTION_ES_HOSTS}" ] + ssl => true + cacert => "${PRODUCTION_ES_SSL_CERTIFICATE_AUTHORITY}" + user => "${PRODUCTION_ES_USER}" + password => "${PRODUCTION_ES_PASSWORD}" + } + } +--- \ No newline at end of file diff --git a/config/samples/logstash/logstash_stackmonitor.yaml b/config/samples/logstash/logstash_stackmonitor.yaml new file mode 100644 index 0000000000..5194e52be2 --- /dev/null +++ b/config/samples/logstash/logstash_stackmonitor.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: monitoring +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 1 + config: + node.store.allow_mmap: false +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash-sample +spec: + count: 1 + version: 8.7.0 + config: + log.level: info + api.http.host: "0.0.0.0" + queue.type: memory + podTemplate: + spec: + containers: + - name: logstash + monitoring: + metrics: + elasticsearchRefs: + - name: monitoring + logs: + elasticsearchRefs: + - name: monitoring +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana-sample +spec: + version: 8.6.1 + elasticsearchRef: + name: monitoring + count: 1 +--- \ No newline at end of file diff --git a/config/samples/logstash/logstash_svc.yaml b/config/samples/logstash/logstash_svc.yaml new file mode 100644 index 0000000000..1ede140468 --- /dev/null +++ b/config/samples/logstash/logstash_svc.yaml @@ -0,0 +1,51 @@ +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch-sample +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash-sample +spec: + count: 2 + version: 8.6.1 + config: + log.level: info + api.http.host: "0.0.0.0" + api.http.port: 9601 + queue.type: memory + pipelines: + - pipeline.id: main + pipeline.workers: 2 + config.string: "input { beats { port => 5044 }} output { stdout {}}" + services: + - name: api + service: + spec: + type: ClusterIP + ports: + - port: 9601 + name: "api" + protocol: TCP + targetPort: 9601 + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 + - port: 5045 + name: "winlogbeat" + protocol: TCP + targetPort: 5045 diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index d6553db597..b686e2fae2 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -202,6 +202,28 @@ webhooks: resources: - kibanas sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-logstash-k8s-elastic-co-v1alpha1-logstash + failurePolicy: Ignore + matchPolicy: Exact + name: elastic-logstash-validation-v1alpha1.k8s.elastic.co + rules: + - apiGroups: + - logstash.k8s.elastic.co + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - logstashes + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 diff --git a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml index 499ba09063..1a5886c577 100644 --- a/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml +++ b/deploy/eck-operator/charts/eck-operator-crds/templates/all-crds.yaml @@ -9087,6 +9087,797 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.4 + labels: + app.kubernetes.io/instance: '{{ .Release.Name }}' + app.kubernetes.io/managed-by: '{{ .Release.Service }}' + app.kubernetes.io/name: '{{ include "eck-operator-crds.name" . }}' + app.kubernetes.io/version: '{{ .Chart.AppVersion }}' + helm.sh/chart: '{{ include "eck-operator-crds.chart" . }}' + name: logstashes.logstash.k8s.elastic.co +spec: + group: logstash.k8s.elastic.co + names: + categories: + - elastic + kind: Logstash + listKind: LogstashList + plural: logstashes + shortNames: + - ls + singular: logstash + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Available nodes + jsonPath: .status.availableNodes + name: available + type: integer + - description: Expected nodes + jsonPath: .status.expectedNodes + name: expected + type: integer + - jsonPath: .metadata.creationTimestamp + name: age + type: date + - description: Logstash version + jsonPath: .status.version + name: version + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Logstash is the Schema for the logstashes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LogstashSpec defines the desired state of Logstash + properties: + config: + description: Config holds the Logstash configuration. At most one + of [`Config`, `ConfigRef`] can be specified. + type: object + x-kubernetes-preserve-unknown-fields: true + configRef: + description: ConfigRef contains a reference to an existing Kubernetes + Secret holding the Logstash configuration. Logstash settings must + be specified as yaml, under a single "logstash.yml" entry. At most + one of [`Config`, `ConfigRef`] can be specified. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + count: + format: int32 + type: integer + elasticsearchRefs: + description: ElasticsearchRefs are references to Elasticsearch clusters + running in the same Kubernetes cluster. + items: + description: ElasticsearchCluster is a named reference to an Elasticsearch + cluster which can be used in a Logstash pipeline. + properties: + clusterName: + description: ClusterName is an alias for the cluster to be used + to refer to the Elasticsearch cluster in Logstash configuration + files, and will be used to identify "named clusters" in Logstash + minLength: 1 + type: string + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If empty, defaults + to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing Kubernetes + secret that contains connection information for associating + an Elastic resource not managed by the operator. The referenced + secret must contain the following: - `url`: the URL to reach + the Elastic resource - `username`: the username of the user + to be authenticated to the Elastic resource - `password`: + the password of the user to be authenticated to the Elastic + resource - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the other fields + name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing Kubernetes + service which is used to make requests to the referenced object. + It has to be in the same namespace as the referenced resource. + If left empty, the default HTTP service of the referenced + resource is used. + type: string + type: object + type: array + image: + description: Image is the Logstash Docker image to deploy. Version + and Type have to match the Logstash in the image. + type: string + monitoring: + description: Monitoring enables you to collect and ship log and monitoring + data of this Logstash. Metricbeat and Filebeat are deployed in the + same Pod as sidecars and each one sends data to one or two different + Elasticsearch monitoring clusters running in the same Kubernetes + cluster. + properties: + logs: + description: Logs holds references to Elasticsearch clusters which + receive log data from an associated resource. + properties: + elasticsearchRefs: + description: ElasticsearchRefs is a reference to a list of + monitoring Elasticsearch clusters running in the same Kubernetes + cluster. Due to existing limitations, only a single Elasticsearch + cluster is currently supported. + items: + description: ObjectSelector defines a reference to a Kubernetes + object which can be an Elastic resource managed by the + operator or a Secret describing an external Elastic resource + not managed by the operator. + properties: + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If + empty, defaults to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing + Kubernetes secret that contains connection information + for associating an Elastic resource not managed by + the operator. The referenced secret must contain the + following: - `url`: the URL to reach the Elastic resource + - `username`: the username of the user to be authenticated + to the Elastic resource - `password`: the password + of the user to be authenticated to the Elastic resource + - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the + other fields name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing + Kubernetes service which is used to make requests + to the referenced object. It has to be in the same + namespace as the referenced resource. If left empty, + the default HTTP service of the referenced resource + is used. + type: string + type: object + type: array + type: object + metrics: + description: Metrics holds references to Elasticsearch clusters + which receive monitoring data from this resource. + properties: + elasticsearchRefs: + description: ElasticsearchRefs is a reference to a list of + monitoring Elasticsearch clusters running in the same Kubernetes + cluster. Due to existing limitations, only a single Elasticsearch + cluster is currently supported. + items: + description: ObjectSelector defines a reference to a Kubernetes + object which can be an Elastic resource managed by the + operator or a Secret describing an external Elastic resource + not managed by the operator. + properties: + name: + description: Name of an existing Kubernetes object corresponding + to an Elastic resource managed by ECK. + type: string + namespace: + description: Namespace of the Kubernetes object. If + empty, defaults to the current namespace. + type: string + secretName: + description: 'SecretName is the name of an existing + Kubernetes secret that contains connection information + for associating an Elastic resource not managed by + the operator. The referenced secret must contain the + following: - `url`: the URL to reach the Elastic resource + - `username`: the username of the user to be authenticated + to the Elastic resource - `password`: the password + of the user to be authenticated to the Elastic resource + - `ca.crt`: the CA certificate in PEM format (optional). + This field cannot be used in combination with the + other fields name, namespace or serviceName.' + type: string + serviceName: + description: ServiceName is the name of an existing + Kubernetes service which is used to make requests + to the referenced object. It has to be in the same + namespace as the referenced resource. If left empty, + the default HTTP service of the referenced resource + is used. + type: string + type: object + type: array + type: object + type: object + pipelines: + description: Pipelines holds the Logstash Pipelines. At most one of + [`Pipelines`, `PipelinesRef`] can be specified. + items: + type: object + type: array + x-kubernetes-preserve-unknown-fields: true + pipelinesRef: + description: PipelinesRef contains a reference to an existing Kubernetes + Secret holding the Logstash Pipelines. Logstash pipelines must be + specified as yaml, under a single "pipelines.yml" entry. At most + one of [`Pipelines`, `PipelinesRef`] can be specified. + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + podTemplate: + description: PodTemplate provides customisation options for the Logstash + pods. + type: object + x-kubernetes-preserve-unknown-fields: true + revisionHistoryLimit: + description: RevisionHistoryLimit is the number of revisions to retain + to allow rollback in the underlying StatefulSet. + format: int32 + type: integer + secureSettings: + description: SecureSettings is a list of references to Kubernetes + Secrets containing sensitive configuration options for the Logstash. + Secrets data can be then referenced in the Logstash config using + the Secret's keys or as specified in `Entries` field of each SecureSetting. + items: + description: SecretSource defines a data source based on a Kubernetes + Secret. + properties: + entries: + description: Entries define how to project each key-value pair + in the secret to filesystem paths. If not defined, all keys + will be projected to similarly named paths in the filesystem. + If defined, only the specified keys will be projected to the + corresponding paths. + items: + description: KeyToPath defines how to map a key in a Secret + object to a filesystem path. + properties: + key: + description: Key is the key contained in the secret. + type: string + path: + description: Path is the relative file path to map the + key to. Path must not be an absolute file path and must + not contain any ".." components. + type: string + required: + - key + type: object + type: array + secretName: + description: SecretName is the name of the secret. + type: string + required: + - secretName + type: object + type: array + serviceAccountName: + description: ServiceAccountName is used to check access from the current + resource to Elasticsearch resource in a different namespace. Can + only be used if ECK is enforcing RBAC on references. + type: string + services: + description: 'Services contains details of services that Logstash + should expose - similar to the HTTP layer configuration for the + rest of the stack, but also applicable for more use cases than the + metrics API, as logstash may need to be opened up for other services: + Beats, TCP, UDP, etc, inputs.' + items: + properties: + name: + type: string + service: + description: Service defines the template for the associated + Kubernetes Service object. + properties: + metadata: + description: ObjectMeta is the metadata of the service. + The name and namespace provided here are managed by ECK + and will be ignored. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: Spec is the specification of the service. + properties: + allocateLoadBalancerNodePorts: + description: allocateLoadBalancerNodePorts defines if + NodePorts will be automatically allocated for services + with type LoadBalancer. Default is "true". It may + be set to "false" if the cluster load-balancer does + not rely on NodePorts. If the caller requests specific + NodePorts (by specifying a value), those requests + will be respected, regardless of this field. This + field may only be set for services with type LoadBalancer + and will be cleared if the type is changed to any + other type. + type: boolean + clusterIP: + description: 'clusterIP is the IP address of the service + and is usually assigned randomly. If an address is + specified manually, is in-range (as per system configuration), + and is not in use, it will be allocated to the service; + otherwise creation of the service will fail. This + field may not be changed through updates unless the + type field is also being changed to ExternalName (which + requires this field to be blank) or the type field + is being changed from ExternalName (in which case + this field may optionally be specified, as describe + above). Valid values are "None", empty string (""), + or a valid IP address. Setting this to "None" makes + a "headless service" (no virtual IP), which is useful + when direct endpoint connections are preferred and + proxying is not required. Only applies to types ClusterIP, + NodePort, and LoadBalancer. If this field is specified + when creating a Service of type ExternalName, creation + will fail. This field will be wiped when updating + a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + clusterIPs: + description: "ClusterIPs is a list of IP addresses assigned + to this service, and are usually assigned randomly. + \ If an address is specified manually, is in-range + (as per system configuration), and is not in use, + it will be allocated to the service; otherwise creation + of the service will fail. This field may not be changed + through updates unless the type field is also being + changed to ExternalName (which requires this field + to be empty) or the type field is being changed from + ExternalName (in which case this field may optionally + be specified, as describe above). Valid values are + \"None\", empty string (\"\"), or a valid IP address. + \ Setting this to \"None\" makes a \"headless service\" + (no virtual IP), which is useful when direct endpoint + connections are preferred and proxying is not required. + \ Only applies to types ClusterIP, NodePort, and LoadBalancer. + If this field is specified when creating a Service + of type ExternalName, creation will fail. This field + will be wiped when updating a Service to type ExternalName. + \ If this field is not specified, it will be initialized + from the clusterIP field. If this field is specified, + clients must ensure that clusterIPs[0] and clusterIP + have the same value. \n This field may hold a maximum + of two entries (dual-stack IPs, in either order). + These IPs must correspond to the values of the ipFamilies + field. Both clusterIPs and ipFamilies are governed + by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + items: + type: string + type: array + x-kubernetes-list-type: atomic + externalIPs: + description: externalIPs is a list of IP addresses for + which nodes in the cluster will also accept traffic + for this service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes + system. + items: + type: string + type: array + externalName: + description: externalName is the external reference + that discovery mechanisms will return as an alias + for this service (e.g. a DNS CNAME record). No proxying + will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires + `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: externalTrafficPolicy describes how nodes + distribute service traffic they receive on one of + the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", + the proxy will configure the service in a way that + assumes that external load balancers will take care + of balancing the service traffic between nodes, and + so each node will deliver traffic only to the node-local + endpoints of the service, without masquerading the + client source IP. (Traffic mistakenly sent to a node + with no endpoints will be dropped.) The default value, + "Cluster", uses the standard behavior of routing to + all endpoints evenly (possibly modified by topology + and other features). Note that traffic sent to an + External IP or LoadBalancer IP from within the cluster + will always get "Cluster" semantics, but clients sending + to a NodePort from within the cluster may need to + take traffic policy into account when picking a node. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. This only applies when type + is set to LoadBalancer and externalTrafficPolicy is + set to Local. If a value is specified, is in-range, + and is not in use, it will be used. If not specified, + a value will be automatically allocated. External + systems (e.g. load-balancers) can use this port to + determine if a given node holds endpoints for this + service or not. If this field is specified when creating + a Service which does not need it, creation will fail. + This field will be wiped when updating a Service to + no longer need it (e.g. changing type). This field + cannot be updated once set. + format: int32 + type: integer + internalTrafficPolicy: + description: InternalTrafficPolicy describes how nodes + distribute service traffic they receive on the ClusterIP. + If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the + same node as the pod, dropping the traffic if there + are no local endpoints. The default value, "Cluster", + uses the standard behavior of routing to all endpoints + evenly (possibly modified by topology and other features). + type: string + ipFamilies: + description: "IPFamilies is a list of IP families (e.g. + IPv4, IPv6) assigned to this service. This field is + usually assigned automatically based on cluster configuration + and the ipFamilyPolicy field. If this field is specified + manually, the requested family is available in the + cluster, and ipFamilyPolicy allows it, it will be + used; otherwise creation of the service will fail. + This field is conditionally mutable: it allows for + adding or removing a secondary IP family, but it does + not allow changing the primary IP family of the Service. + Valid values are \"IPv4\" and \"IPv6\". This field + only applies to Services of types ClusterIP, NodePort, + and LoadBalancer, and does apply to \"headless\" services. + This field will be wiped when updating a Service to + type ExternalName. \n This field may hold a maximum + of two entries (dual-stack families, in either order). + \ These families must correspond to the values of + the clusterIPs field, if specified. Both clusterIPs + and ipFamilies are governed by the ipFamilyPolicy + field." + items: + description: IPFamily represents the IP Family (IPv4 + or IPv6). This type is used to express the family + of an IP expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + x-kubernetes-list-type: atomic + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by this Service. If there is + no value provided, then this field will be set to + SingleStack. Services can be "SingleStack" (a single + IP family), "PreferDualStack" (two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters), or "RequireDualStack" (two + IP families on dual-stack configured clusters, otherwise + fail). The ipFamilies and clusterIPs fields depend + on the value of this field. This field will be wiped + when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: loadBalancerClass is the class of the load + balancer implementation this Service belongs to. If + specified, the value of this field must be a label-style + identifier, with an optional prefix, e.g. "internal-vip" + or "example.com/internal-vip". Unprefixed names are + reserved for end-users. This field can only be set + when the Service type is 'LoadBalancer'. If not set, + the default load balancer implementation is used, + today this is typically done through the cloud provider + integration, but should apply for any default implementation. + If set, it is assumed that a load balancer implementation + is watching for Services with a matching class. Any + default load balancer implementation (e.g. cloud providers) + should ignore Services that set this field. This field + can only be set when creating or updating a Service + to type 'LoadBalancer'. Once set, it can not be changed. + This field will be wiped when a service is updated + to a non 'LoadBalancer' type. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer. + This feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load + balancer is created. This field will be ignored if + the cloud-provider does not support the feature. Deprecated: + This field was under-specified and its meaning varies + across implementations, and it cannot support dual-stack. + As of Kubernetes v1.24, users are encouraged to use + implementation-specific annotations when available. + This field may be removed in a future API version.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified + client IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by + this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this + port. This field follows standard Kubernetes + label syntax. Un-prefixed names are reserved + for IANA standard service names (as per RFC-6335 + and https://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names + such as mycompany.com/my-custom-protocol. + type: string + name: + description: The name of this port within the + service. This must be a DNS_LABEL. All ports + within a ServiceSpec must have unique names. + When considering the endpoints for a Service, + this must match the 'name' field in the EndpointPort. + Optional if only one ServicePort is defined + on this service. + type: string + nodePort: + description: 'The port on each node on which this + service is exposed when type is NodePort or + LoadBalancer. Usually assigned by the system. + If a value is specified, in-range, and not in + use it will be used, otherwise the operation + will fail. If not specified, a port will be + allocated if this Service requires one. If + this field is specified when creating a Service + which does not need it, creation will fail. + This field will be wiped when updating a Service + to no longer need it (e.g. changing type from + NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by + this service. + format: int32 + type: integer + protocol: + default: TCP + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access + on the pods targeted by the service. Number + must be in the range 1 to 65535. Name must be + an IANA_SVC_NAME. If this is a string, it will + be looked up as a named port in the target Pod''s + container ports. If this is not specified, the + value of the ''port'' field is used (an identity + map). This field is ignored for services with + clusterIP=None, and should be omitted or set + equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses indicates that + any agent which deals with endpoints for this Service + should disregard any indications of ready/not-ready. + The primary use case for setting this field is for + a StatefulSet's Headless Service to propagate SRV + DNS records for its Pods for the purpose of peer discovery. + The Kubernetes controllers that generate Endpoints + and EndpointSlice resources for Services interpret + this to mean that all endpoints are considered "ready" + even if the Pods themselves are not. Agents which + consume only Kubernetes generated endpoints through + the Endpoints or EndpointSlice resources can safely + assume this behavior. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label + keys and values matching this selector. If empty or + not present, the service is assumed to have an external + process managing its endpoints, which Kubernetes will + not modify. Only applies to types ClusterIP, NodePort, + and LoadBalancer. Ignored if type is ExternalName. + More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + x-kubernetes-map-type: atomic + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to + maintain session affinity. Enable client IP based + session affinity. Must be ClientIP or None. Defaults + to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The + value must be >0 && <=86400(for 1 day) if + ServiceAffinity == "ClientIP". Default value + is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, + ClusterIP, NodePort, and LoadBalancer. "ClusterIP" + allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector + or if that is not specified, by manual construction + of an Endpoints object or EndpointSlice objects. If + clusterIP is "None", no virtual IP is allocated and + the endpoints are published as a set of endpoints + rather than a virtual IP. "NodePort" builds on ClusterIP + and allocates a port on every node which routes to + the same endpoints as the clusterIP. "LoadBalancer" + builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to + the same endpoints as the clusterIP. "ExternalName" + aliases this service to the specified externalName. + Several other fields do not apply to ExternalName + services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + type: object + tls: + description: TLS defines options for configuring TLS for HTTP. + properties: + certificate: + description: "Certificate is a reference to a Kubernetes + secret that contains the certificate and private key for + enabling TLS. The referenced secret should contain the + following: \n - `ca.crt`: The certificate authority (optional). + - `tls.crt`: The certificate (or a chain). - `tls.key`: + The private key to the first certificate in the certificate + chain." + properties: + secretName: + description: SecretName is the name of the secret. + type: string + type: object + selfSignedCertificate: + description: SelfSignedCertificate allows configuring the + self-signed certificate generated by the operator. + properties: + disabled: + description: Disabled indicates that the provisioning + of the self-signed certifcate should be disabled. + type: boolean + subjectAltNames: + description: SubjectAlternativeNames is a list of SANs + to include in the generated HTTP TLS certificate. + items: + description: SubjectAlternativeName represents a SAN + entry in a x509 certificate. + properties: + dns: + description: DNS is the DNS name of the subject. + type: string + ip: + description: IP is the IP address of the subject. + type: string + type: object + type: array + type: object + type: object + type: object + type: array + version: + description: Version of the Logstash. + type: string + required: + - version + type: object + status: + description: LogstashStatus defines the observed state of Logstash + properties: + availableNodes: + format: int32 + type: integer + elasticsearchAssociationsStatus: + additionalProperties: + description: AssociationStatus is the status of an association resource. + type: string + description: ElasticsearchAssociationStatus is the status of any auto-linking + to Elasticsearch clusters. + type: object + expectedNodes: + format: int32 + type: integer + monitoringAssociationStatus: + additionalProperties: + description: AssociationStatus is the status of an association resource. + type: string + description: MonitoringAssociationStatus is the status of any auto-linking + to monitoring Elasticsearch clusters. + type: object + observedGeneration: + description: ObservedGeneration is the most recent generation observed + for this Logstash instance. It corresponds to the metadata generation, + which is updated on mutation by the API Server. If the generation + observed in status diverges from the generation in metadata, the + Logstash controller has not yet processed the changes contained + in the Logstash specification. + format: int64 + type: integer + version: + description: 'Version of the stack resource currently running. During + version upgrades, multiple versions may run in parallel: this value + specifies the lowest version currently running.' + type: string + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.count + statusReplicasPath: .status.count + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.11.4 diff --git a/deploy/eck-operator/templates/_helpers.tpl b/deploy/eck-operator/templates/_helpers.tpl index 424dd0be1f..8c421f7b55 100644 --- a/deploy/eck-operator/templates/_helpers.tpl +++ b/deploy/eck-operator/templates/_helpers.tpl @@ -310,6 +310,19 @@ updating docs/operating-eck/eck-permissions.asciidoc file. - create - update - patch +- apiGroups: + - logstash.k8s.elastic.co + resources: + - logstashes + - logstashes/status + - logstashes/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP + verbs: + - get + - list + - watch + - create + - update + - patch {{- end -}} {{/* diff --git a/deploy/eck-operator/templates/cluster-roles.yaml b/deploy/eck-operator/templates/cluster-roles.yaml index c6fa56cf71..1b623f37fe 100644 --- a/deploy/eck-operator/templates/cluster-roles.yaml +++ b/deploy/eck-operator/templates/cluster-roles.yaml @@ -50,6 +50,9 @@ rules: - apiGroups: ["stackconfigpolicy.k8s.elastic.co"] resources: ["stackconfigpolicies"] verbs: ["get", "list", "watch"] + - apiGroups: ["logstash.k8s.elastic.co"] + resources: ["logstashes"] + verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -87,4 +90,7 @@ rules: - apiGroups: ["stackconfigpolicy.k8s.elastic.co"] resources: ["stackconfigpolicies"] verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["logstash.k8s.elastic.co"] + resources: ["logstashes"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] {{- end -}} diff --git a/docs/reference/api-docs.asciidoc b/docs/reference/api-docs.asciidoc index c2d94ffc7f..84b20cbda0 100644 --- a/docs/reference/api-docs.asciidoc +++ b/docs/reference/api-docs.asciidoc @@ -26,6 +26,7 @@ endif::[] - xref:{anchor_prefix}-enterprisesearch-k8s-elastic-co-v1beta1[$$enterprisesearch.k8s.elastic.co/v1beta1$$] - xref:{anchor_prefix}-kibana-k8s-elastic-co-v1[$$kibana.k8s.elastic.co/v1$$] - xref:{anchor_prefix}-kibana-k8s-elastic-co-v1beta1[$$kibana.k8s.elastic.co/v1beta1$$] +- xref:{anchor_prefix}-logstash-k8s-elastic-co-v1alpha1[$$logstash.k8s.elastic.co/v1alpha1$$] - xref:{anchor_prefix}-maps-k8s-elastic-co-v1alpha1[$$maps.k8s.elastic.co/v1alpha1$$] - xref:{anchor_prefix}-stackconfigpolicy-k8s-elastic-co-v1alpha1[$$stackconfigpolicy.k8s.elastic.co/v1alpha1$$] @@ -448,6 +449,7 @@ Config represents untyped YAML configuration. - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-enterprisesearch-v1beta1-enterprisesearchspec[$$EnterpriseSearchSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-stackconfigpolicy-v1alpha1-indextemplates[$$IndexTemplates$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-kibana-v1-kibanaspec[$$KibanaSpec$$] +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec[$$LogstashSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-maps-v1alpha1-mapsspec[$$MapsSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-elasticsearch-v1-nodeset[$$NodeSet$$] **** @@ -465,6 +467,7 @@ ConfigSource references configuration settings. - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-beat-v1beta1-beatspec[$$BeatSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-enterprisesearch-v1-enterprisesearchspec[$$EnterpriseSearchSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-enterprisesearch-v1beta1-enterprisesearchspec[$$EnterpriseSearchSpec$$] +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec[$$LogstashSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-maps-v1alpha1-mapsspec[$$MapsSpec$$] **** @@ -587,6 +590,7 @@ Monitoring holds references to both the metrics, and logs Elasticsearch clusters - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-beat-v1beta1-beatspec[$$BeatSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-elasticsearch-v1-elasticsearchspec[$$ElasticsearchSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-kibana-v1-kibanaspec[$$KibanaSpec$$] +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec[$$LogstashSpec$$] **** [cols="25a,75a", options="header"] @@ -607,6 +611,7 @@ ObjectSelector defines a reference to a Kubernetes object which can be an Elasti - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-agent-v1alpha1-agentspec[$$AgentSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-apm-v1-apmserverspec[$$ApmServerSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-beat-v1beta1-beatspec[$$BeatSpec$$] +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-elasticsearchcluster[$$ElasticsearchCluster$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-enterprisesearch-v1-enterprisesearchspec[$$EnterpriseSearchSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-enterprisesearch-v1beta1-enterprisesearchspec[$$EnterpriseSearchSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-kibana-v1-kibanaspec[$$KibanaSpec$$] @@ -678,6 +683,7 @@ SecretSource defines a data source based on a Kubernetes Secret. - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-beat-v1beta1-beatspec[$$BeatSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-elasticsearch-v1-elasticsearchspec[$$ElasticsearchSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-kibana-v1-kibanaspec[$$KibanaSpec$$] +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec[$$LogstashSpec$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-stackconfigpolicy-v1alpha1-stackconfigpolicyspec[$$StackConfigPolicySpec$$] **** @@ -715,6 +721,7 @@ ServiceTemplate defines the template for a Kubernetes Service. .Appears In: **** - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-httpconfig[$$HTTPConfig$$] +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashservice[$$LogstashService$$] - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-elasticsearch-v1-transportconfig[$$TransportConfig$$] **** @@ -754,6 +761,7 @@ TLSOptions holds TLS configuration options. .Appears In: **** - xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-httpconfig[$$HTTPConfig$$] +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashservice[$$LogstashService$$] **** [cols="25a,75a", options="header"] @@ -1824,6 +1832,123 @@ KibanaSpec holds the specification of a Kibana instance. +[id="{anchor_prefix}-logstash-k8s-elastic-co-v1alpha1"] +== logstash.k8s.elastic.co/v1alpha1 + +Package v1alpha1 contains API Schema definitions for the logstash v1alpha1 API group + +.Resource Types +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstash[$$Logstash$$] + + + +[id="{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-elasticsearchcluster"] +=== ElasticsearchCluster + +ElasticsearchCluster is a named reference to an Elasticsearch cluster which can be used in a Logstash pipeline. + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec[$$LogstashSpec$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`ObjectSelector`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-objectselector[$$ObjectSelector$$]__ | +| *`clusterName`* __string__ | ClusterName is an alias for the cluster to be used to refer to the Elasticsearch cluster in Logstash configuration files, and will be used to identify "named clusters" in Logstash +|=== + + +[id="{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstash"] +=== Logstash + +Logstash is the Schema for the logstashes API + + + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`apiVersion`* __string__ | `logstash.k8s.elastic.co/v1alpha1` +| *`kind`* __string__ | `Logstash` +| *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`. + +| *`spec`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec[$$LogstashSpec$$]__ | +| *`status`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashstatus[$$LogstashStatus$$]__ | +|=== + + +[id="{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashservice"] +=== LogstashService + + + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec[$$LogstashSpec$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`name`* __string__ | +| *`service`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-servicetemplate[$$ServiceTemplate$$]__ | Service defines the template for the associated Kubernetes Service object. +| *`tls`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-tlsoptions[$$TLSOptions$$]__ | TLS defines options for configuring TLS for HTTP. +|=== + + +[id="{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashspec"] +=== LogstashSpec + +LogstashSpec defines the desired state of Logstash + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstash[$$Logstash$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`version`* __string__ | Version of the Logstash. +| *`count`* __integer__ | +| *`image`* __string__ | Image is the Logstash Docker image to deploy. Version and Type have to match the Logstash in the image. +| *`elasticsearchRefs`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-elasticsearchcluster[$$ElasticsearchCluster$$] array__ | ElasticsearchRefs are references to Elasticsearch clusters running in the same Kubernetes cluster. +| *`config`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-config[$$Config$$]__ | Config holds the Logstash configuration. At most one of [`Config`, `ConfigRef`] can be specified. +| *`configRef`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-configsource[$$ConfigSource$$]__ | ConfigRef contains a reference to an existing Kubernetes Secret holding the Logstash configuration. Logstash settings must be specified as yaml, under a single "logstash.yml" entry. At most one of [`Config`, `ConfigRef`] can be specified. +| *`pipelines`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-config[$$Config$$] array__ | Pipelines holds the Logstash Pipelines. At most one of [`Pipelines`, `PipelinesRef`] can be specified. +| *`pipelinesRef`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-configsource[$$ConfigSource$$]__ | PipelinesRef contains a reference to an existing Kubernetes Secret holding the Logstash Pipelines. Logstash pipelines must be specified as yaml, under a single "pipelines.yml" entry. At most one of [`Pipelines`, `PipelinesRef`] can be specified. +| *`services`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashservice[$$LogstashService$$] array__ | Services contains details of services that Logstash should expose - similar to the HTTP layer configuration for the rest of the stack, but also applicable for more use cases than the metrics API, as logstash may need to be opened up for other services: Beats, TCP, UDP, etc, inputs. +| *`monitoring`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-monitoring[$$Monitoring$$]__ | Monitoring enables you to collect and ship log and monitoring data of this Logstash. Metricbeat and Filebeat are deployed in the same Pod as sidecars and each one sends data to one or two different Elasticsearch monitoring clusters running in the same Kubernetes cluster. +| *`podTemplate`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podtemplatespec-v1-core[$$PodTemplateSpec$$]__ | PodTemplate provides customisation options for the Logstash pods. +| *`revisionHistoryLimit`* __integer__ | RevisionHistoryLimit is the number of revisions to retain to allow rollback in the underlying StatefulSet. +| *`secureSettings`* __xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-common-v1-secretsource[$$SecretSource$$] array__ | SecureSettings is a list of references to Kubernetes Secrets containing sensitive configuration options for the Logstash. Secrets data can be then referenced in the Logstash config using the Secret's keys or as specified in `Entries` field of each SecureSetting. +| *`serviceAccountName`* __string__ | ServiceAccountName is used to check access from the current resource to Elasticsearch resource in a different namespace. Can only be used if ECK is enforcing RBAC on references. +|=== + + +[id="{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstashstatus"] +=== LogstashStatus + +LogstashStatus defines the observed state of Logstash + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-elastic-cloud-on-k8s-v2-pkg-apis-logstash-v1alpha1-logstash[$$Logstash$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`version`* __string__ | Version of the stack resource currently running. During version upgrades, multiple versions may run in parallel: this value specifies the lowest version currently running. +| *`expectedNodes`* __integer__ | +| *`availableNodes`* __integer__ | +| *`observedGeneration`* __integer__ | ObservedGeneration is the most recent generation observed for this Logstash instance. It corresponds to the metadata generation, which is updated on mutation by the API Server. If the generation observed in status diverges from the generation in metadata, the Logstash controller has not yet processed the changes contained in the Logstash specification. +|=== + + + [id="{anchor_prefix}-maps-k8s-elastic-co-v1alpha1"] == maps.k8s.elastic.co/v1alpha1 diff --git a/hack/api-docs/config.yaml b/hack/api-docs/config.yaml index ec20aa7197..0e09c97da0 100644 --- a/hack/api-docs/config.yaml +++ b/hack/api-docs/config.yaml @@ -1,6 +1,6 @@ processor: ignoreTypes: - - "(Elasticsearch|ElasticsearchAutoscaler|Kibana|ApmServer|EnterpriseSearch|Beat|Agent|StackConfigPolicy)List$" + - "(Elasticsearch|ElasticsearchAutoscaler|Kibana|ApmServer|EnterpriseSearch|Beat|Agent|StackConfigPolicy|Logstash)List$" - "(Kibana|ApmServer|EnterpriseSearch|Beat|Agent|StackConfigPolicy)Health$" - "(ElasticsearchAutoscaler|Kibana|ApmServer|Reconciler|EnterpriseSearch|Beat|Agent|Maps|Policy)Status$" - "ElasticsearchSettings$" diff --git a/hack/operatorhub/config.yaml b/hack/operatorhub/config.yaml index 358a607f38..bc9ea8345e 100644 --- a/hack/operatorhub/config.yaml +++ b/hack/operatorhub/config.yaml @@ -30,6 +30,9 @@ crds: - name: stackconfigpolicies.stackconfigpolicy.k8s.elastic.co displayName: Elastic Stack Config Policy description: Elastic Stack Config Policy + - name: logstashes.logstash.k8s.elastic.co + displayName: Logstash + description: Logstash instance packages: - outputPath: community-operators packageName: elastic-cloud-eck diff --git a/hack/operatorhub/templates/csv.tpl b/hack/operatorhub/templates/csv.tpl index 643d49611c..730531dced 100644 --- a/hack/operatorhub/templates/csv.tpl +++ b/hack/operatorhub/templates/csv.tpl @@ -7,7 +7,7 @@ metadata: certified: 'false' containerImage: {{ .OperatorRepo }}{{ .Tag }} createdAt: {{ now | date "2006-01-02 15:04:05" }} - description: Run Elasticsearch, Kibana, APM Server, Beats, Enterprise Search, Elastic Agent and Elastic Maps Server on Kubernetes and OpenShift + description: Run Elasticsearch, Kibana, APM Server, Beats, Enterprise Search, Elastic Agent, Elastic Maps Server and Logstash on Kubernetes and OpenShift repository: https://github.com/elastic/cloud-on-k8s support: elastic.co alm-examples: |- @@ -226,6 +226,17 @@ metadata: "name": "elasticsearch-sample" } } + }, + { + "apiVersion": "logstash.k8s.elastic.co/v1alpha1", + "kind": "Logstash", + "metadata" : { + "name": "logstash-sample" + }, + "spec": { + "version": "{{ .StackVersion }}", + "count": 1 + } } ] name: {{ .PackageName }}.v{{ .NewVersion }} diff --git a/pkg/apis/beat/v1beta1/validations.go b/pkg/apis/beat/v1beta1/validations.go index 7f147ee7db..d42148bbb6 100644 --- a/pkg/apis/beat/v1beta1/validations.go +++ b/pkg/apis/beat/v1beta1/validations.go @@ -121,5 +121,5 @@ func checkAssociations(b *Beat) field.ErrorList { } func checkMonitoring(b *Beat) field.ErrorList { - return validations.Validate(b, b.Spec.Version) + return validations.Validate(b, b.Spec.Version, validations.MinStackVersion) } diff --git a/pkg/apis/common/v1/association.go b/pkg/apis/common/v1/association.go index 721055c273..8bcfaa2f23 100644 --- a/pkg/apis/common/v1/association.go +++ b/pkg/apis/common/v1/association.go @@ -110,6 +110,8 @@ const ( BeatAssociationType = "beat" BeatMonitoringAssociationType = "beat-monitoring" + LogstashMonitoringAssociationType = "ls-monitoring" + AssociationUnknown AssociationStatus = "" AssociationPending AssociationStatus = "Pending" AssociationEstablished AssociationStatus = "Established" diff --git a/pkg/apis/kibana/v1/webhook.go b/pkg/apis/kibana/v1/webhook.go index a304cfe946..bc0df62098 100644 --- a/pkg/apis/kibana/v1/webhook.go +++ b/pkg/apis/kibana/v1/webhook.go @@ -123,7 +123,7 @@ func checkNoDowngrade(prev, curr *Kibana) field.ErrorList { } func checkMonitoring(k *Kibana) field.ErrorList { - errs := validations.Validate(k, k.Spec.Version) + errs := validations.Validate(k, k.Spec.Version, validations.MinStackVersion) // Kibana must be associated to an Elasticsearch when monitoring metrics are enabled if monitoring.IsMetricsDefined(k) && !k.Spec.ElasticsearchRef.IsDefined() { errs = append(errs, field.Invalid(field.NewPath("spec").Child("elasticsearchRef"), k.Spec.ElasticsearchRef, diff --git a/pkg/apis/logstash/v1alpha1/doc.go b/pkg/apis/logstash/v1alpha1/doc.go new file mode 100644 index 0000000000..a92dd08678 --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/doc.go @@ -0,0 +1,11 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Package v1alpha1 contains API Schema definitions for the logstash v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/elastic/cloud-on-k8s/pkg/apis/logstash +// +k8s:defaulter-gen=TypeMeta +// +groupName=logstash.k8s.elastic.co +package v1alpha1 diff --git a/pkg/apis/logstash/v1alpha1/groupversion_info.go b/pkg/apis/logstash/v1alpha1/groupversion_info.go new file mode 100644 index 0000000000..72425bd52a --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/groupversion_info.go @@ -0,0 +1,21 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "logstash.k8s.elastic.co", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/apis/logstash/v1alpha1/labels.go b/pkg/apis/logstash/v1alpha1/labels.go new file mode 100644 index 0000000000..9d4a22f515 --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/labels.go @@ -0,0 +1,17 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" +) + +// GetIdentityLabels will return the common Elastic assigned labels for Logstash +func (logstash *Logstash) GetIdentityLabels() map[string]string { + return map[string]string{ + commonv1.TypeLabelName: "logstash", + "logstash.k8s.elastic.co/name": logstash.Name, + } +} diff --git a/pkg/apis/logstash/v1alpha1/logstash_types.go b/pkg/apis/logstash/v1alpha1/logstash_types.go new file mode 100644 index 0000000000..e0afea1fb7 --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/logstash_types.go @@ -0,0 +1,381 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" +) + +const ( + LogstashContainerName = "logstash" + // Kind is inferred from the struct name using reflection in SchemeBuilder.Register() + // we duplicate it as a constant here for practical purposes. + Kind = "Logstash" +) + +// LogstashSpec defines the desired state of Logstash +type LogstashSpec struct { + // Version of the Logstash. + Version string `json:"version"` + + Count int32 `json:"count,omitempty"` + + // Image is the Logstash Docker image to deploy. Version and Type have to match the Logstash in the image. + // +kubebuilder:validation:Optional + Image string `json:"image,omitempty"` + + // ElasticsearchRefs are references to Elasticsearch clusters running in the same Kubernetes cluster. + // +kubebuilder:validation:Optional + ElasticsearchRefs []ElasticsearchCluster `json:"elasticsearchRefs,omitempty"` + + // Config holds the Logstash configuration. At most one of [`Config`, `ConfigRef`] can be specified. + // +kubebuilder:validation:Optional + // +kubebuilder:pruning:PreserveUnknownFields + Config *commonv1.Config `json:"config,omitempty"` + + // ConfigRef contains a reference to an existing Kubernetes Secret holding the Logstash configuration. + // Logstash settings must be specified as yaml, under a single "logstash.yml" entry. At most one of [`Config`, `ConfigRef`] + // can be specified. + // +kubebuilder:validation:Optional + ConfigRef *commonv1.ConfigSource `json:"configRef,omitempty"` + + // Pipelines holds the Logstash Pipelines. At most one of [`Pipelines`, `PipelinesRef`] can be specified. + // +kubebuilder:validation:Optional + // +kubebuilder:pruning:PreserveUnknownFields + Pipelines []commonv1.Config `json:"pipelines,omitempty"` + + // PipelinesRef contains a reference to an existing Kubernetes Secret holding the Logstash Pipelines. + // Logstash pipelines must be specified as yaml, under a single "pipelines.yml" entry. At most one of [`Pipelines`, `PipelinesRef`] + // can be specified. + // +kubebuilder:validation:Optional + PipelinesRef *commonv1.ConfigSource `json:"pipelinesRef,omitempty"` + + // Services contains details of services that Logstash should expose - similar to the HTTP layer configuration for the + // rest of the stack, but also applicable for more use cases than the metrics API, as logstash may need to + // be opened up for other services: Beats, TCP, UDP, etc, inputs. + // +kubebuilder:validation:Optional + Services []LogstashService `json:"services,omitempty"` + + // Monitoring enables you to collect and ship log and monitoring data of this Logstash. + // Metricbeat and Filebeat are deployed in the same Pod as sidecars and each one sends data to one or two different + // Elasticsearch monitoring clusters running in the same Kubernetes cluster. + // +kubebuilder:validation:Optional + Monitoring commonv1.Monitoring `json:"monitoring,omitempty"` + + // PodTemplate provides customisation options for the Logstash pods. + // +kubebuilder:pruning:PreserveUnknownFields + PodTemplate corev1.PodTemplateSpec `json:"podTemplate,omitempty"` + + // RevisionHistoryLimit is the number of revisions to retain to allow rollback in the underlying StatefulSet. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + + // SecureSettings is a list of references to Kubernetes Secrets containing sensitive configuration options for the Logstash. + // Secrets data can be then referenced in the Logstash config using the Secret's keys or as specified in `Entries` field of + // each SecureSetting. + // +kubebuilder:validation:Optional + SecureSettings []commonv1.SecretSource `json:"secureSettings,omitempty"` + + // ServiceAccountName is used to check access from the current resource to Elasticsearch resource in a different namespace. + // Can only be used if ECK is enforcing RBAC on references. + // +kubebuilder:validation:Optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` +} + +type LogstashService struct { + Name string `json:"name,omitempty"` + // Service defines the template for the associated Kubernetes Service object. + Service commonv1.ServiceTemplate `json:"service,omitempty"` + // TLS defines options for configuring TLS for HTTP. + TLS commonv1.TLSOptions `json:"tls,omitempty"` +} + +// ElasticsearchCluster is a named reference to an Elasticsearch cluster which can be used in a Logstash pipeline. +type ElasticsearchCluster struct { + commonv1.ObjectSelector `json:",omitempty,inline"` + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // ClusterName is an alias for the cluster to be used to refer to the Elasticsearch cluster in Logstash + // configuration files, and will be used to identify "named clusters" in Logstash + ClusterName string `json:"clusterName,omitempty"` +} + +// LogstashStatus defines the observed state of Logstash +type LogstashStatus struct { + // Version of the stack resource currently running. During version upgrades, multiple versions may run + // in parallel: this value specifies the lowest version currently running. + Version string `json:"version,omitempty"` + + // +kubebuilder:validation:Optional + ExpectedNodes int32 `json:"expectedNodes,omitempty"` + // +kubebuilder:validation:Optional + AvailableNodes int32 `json:"availableNodes,omitempty"` + + // ObservedGeneration is the most recent generation observed for this Logstash instance. + // It corresponds to the metadata generation, which is updated on mutation by the API Server. + // If the generation observed in status diverges from the generation in metadata, the Logstash + // controller has not yet processed the changes contained in the Logstash specification. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ElasticsearchAssociationStatus is the status of any auto-linking to Elasticsearch clusters. + ElasticsearchAssociationsStatus commonv1.AssociationStatusMap `json:"elasticsearchAssociationsStatus,omitempty"` + + // MonitoringAssociationStatus is the status of any auto-linking to monitoring Elasticsearch clusters. + MonitoringAssociationStatus commonv1.AssociationStatusMap `json:"monitoringAssociationStatus,omitempty"` +} + +// +kubebuilder:object:root=true + +// Logstash is the Schema for the logstashes API +// +k8s:openapi-gen=true +// +kubebuilder:resource:categories=elastic,shortName=ls +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="available",type="integer",JSONPath=".status.availableNodes",description="Available nodes" +// +kubebuilder:printcolumn:name="expected",type="integer",JSONPath=".status.expectedNodes",description="Expected nodes" +// +kubebuilder:printcolumn:name="age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="version",type="string",JSONPath=".status.version",description="Logstash version" +// +kubebuilder:subresource:scale:specpath=.spec.count,statuspath=.status.count,selectorpath=.status.selector +// +kubebuilder:storageversion +type Logstash struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LogstashSpec `json:"spec,omitempty"` + Status LogstashStatus `json:"status,omitempty"` + EsAssocConfs map[commonv1.ObjectSelector]commonv1.AssociationConf `json:"-"` + MonitoringAssocConfs map[commonv1.ObjectSelector]commonv1.AssociationConf `json:"-"` +} + +// +kubebuilder:object:root=true + +// LogstashList contains a list of Logstash +type LogstashList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Logstash `json:"items"` +} + +func (l *Logstash) ElasticsearchRefs() []commonv1.ObjectSelector { + refs := make([]commonv1.ObjectSelector, len(l.Spec.ElasticsearchRefs)) + for i, r := range l.Spec.ElasticsearchRefs { + refs[i] = r.ObjectSelector + } + return refs +} +func (l *Logstash) ServiceAccountName() string { + return l.Spec.ServiceAccountName +} + +func (l *Logstash) SecureSettings() []commonv1.SecretSource { + return l.Spec.SecureSettings +} + +// IsMarkedForDeletion returns true if the Logstash is going to be deleted +func (l *Logstash) IsMarkedForDeletion() bool { + return !l.DeletionTimestamp.IsZero() +} + +// GetObservedGeneration will return the observedGeneration from the Elastic Logstash's status. +func (l *Logstash) GetObservedGeneration() int64 { + return l.Status.ObservedGeneration +} + +func (l *Logstash) GetAssociations() []commonv1.Association { + associations := make( + []commonv1.Association, + 0, + len(l.Spec.ElasticsearchRefs)+len(l.Spec.Monitoring.Metrics.ElasticsearchRefs)+len(l.Spec.Monitoring.Logs.ElasticsearchRefs), + ) + + for _, ref := range l.Spec.ElasticsearchRefs { + associations = append(associations, &LogstashESAssociation{ + Logstash: l, + ElasticsearchCluster: ElasticsearchCluster{ + ObjectSelector: ref.WithDefaultNamespace(l.Namespace), + ClusterName: ref.ClusterName, + }, + }) + } + + for _, ref := range l.Spec.Monitoring.Metrics.ElasticsearchRefs { + if ref.IsDefined() { + associations = append(associations, &LogstashMonitoringAssociation{ + Logstash: l, + ref: ref.WithDefaultNamespace(l.Namespace), + }) + } + } + for _, ref := range l.Spec.Monitoring.Logs.ElasticsearchRefs { + if ref.IsDefined() { + associations = append(associations, &LogstashMonitoringAssociation{ + Logstash: l, + ref: ref.WithDefaultNamespace(l.Namespace), + }) + } + } + + return associations +} + +func (l *Logstash) AssociationStatusMap(typ commonv1.AssociationType) commonv1.AssociationStatusMap { + switch typ { + case commonv1.ElasticsearchAssociationType: + if len(l.Spec.ElasticsearchRefs) > 0 { + return l.Status.ElasticsearchAssociationsStatus + } + case commonv1.LogstashMonitoringAssociationType: + for _, esRef := range l.Spec.Monitoring.Metrics.ElasticsearchRefs { + if esRef.IsDefined() { + return l.Status.MonitoringAssociationStatus + } + } + for _, esRef := range l.Spec.Monitoring.Logs.ElasticsearchRefs { + if esRef.IsDefined() { + return l.Status.MonitoringAssociationStatus + } + } + } + + return commonv1.AssociationStatusMap{} +} + +func (l *Logstash) SetAssociationStatusMap(typ commonv1.AssociationType, status commonv1.AssociationStatusMap) error { + switch typ { + case commonv1.ElasticsearchAssociationType: + l.Status.ElasticsearchAssociationsStatus = status + return nil + case commonv1.LogstashMonitoringAssociationType: + l.Status.MonitoringAssociationStatus = status + return nil + default: + return fmt.Errorf("association type %s not known", typ) + } +} + +type LogstashESAssociation struct { + // The associated Logstash + *Logstash + ElasticsearchCluster +} + +var _ commonv1.Association = &LogstashESAssociation{} + +func (lses *LogstashESAssociation) ElasticServiceAccount() (commonv1.ServiceAccountName, error) { + return "", nil +} + +func (lses *LogstashESAssociation) Associated() commonv1.Associated { + if lses == nil { + return nil + } + if lses.Logstash == nil { + lses.Logstash = &Logstash{} + } + return lses.Logstash +} + +func (lses *LogstashESAssociation) AssociationType() commonv1.AssociationType { + return commonv1.ElasticsearchAssociationType +} + +func (lses *LogstashESAssociation) AssociationRef() commonv1.ObjectSelector { + return lses.ElasticsearchCluster.ObjectSelector +} + +func (lses *LogstashESAssociation) AssociationConfAnnotationName() string { + return commonv1.ElasticsearchConfigAnnotationName(lses.ElasticsearchCluster.ObjectSelector) +} + +func (lses *LogstashESAssociation) AssociationConf() (*commonv1.AssociationConf, error) { + return commonv1.GetAndSetAssociationConfByRef(lses, lses.ElasticsearchCluster.ObjectSelector, lses.EsAssocConfs) +} + +func (lses *LogstashESAssociation) SetAssociationConf(conf *commonv1.AssociationConf) { + if lses.EsAssocConfs == nil { + lses.EsAssocConfs = make(map[commonv1.ObjectSelector]commonv1.AssociationConf) + } + if conf != nil { + lses.EsAssocConfs[lses.ElasticsearchCluster.ObjectSelector] = *conf + } +} + +func (lses *LogstashESAssociation) AssociationID() string { + return fmt.Sprintf("%s-%s", lses.ElasticsearchCluster.ObjectSelector.Namespace, lses.ElasticsearchCluster.ObjectSelector.NameOrSecretName()) +} + +type LogstashMonitoringAssociation struct { + // The associated Logstash + *Logstash + // ref is the object selector of the monitoring Elasticsearch referenced in the Association + ref commonv1.ObjectSelector +} + +var _ commonv1.Association = &LogstashMonitoringAssociation{} + +func (lsmon *LogstashMonitoringAssociation) ElasticServiceAccount() (commonv1.ServiceAccountName, error) { + return "", nil +} + +func (lsmon *LogstashMonitoringAssociation) Associated() commonv1.Associated { + if lsmon == nil { + return nil + } + if lsmon.Logstash == nil { + lsmon.Logstash = &Logstash{} + } + return lsmon.Logstash +} + +func (lsmon *LogstashMonitoringAssociation) AssociationConfAnnotationName() string { + return commonv1.ElasticsearchConfigAnnotationName(lsmon.ref) +} + +func (lsmon *LogstashMonitoringAssociation) AssociationType() commonv1.AssociationType { + return commonv1.LogstashMonitoringAssociationType +} + +func (lsmon *LogstashMonitoringAssociation) AssociationRef() commonv1.ObjectSelector { + return lsmon.ref +} + +func (lsmon *LogstashMonitoringAssociation) AssociationConf() (*commonv1.AssociationConf, error) { + return commonv1.GetAndSetAssociationConfByRef(lsmon, lsmon.ref, lsmon.MonitoringAssocConfs) +} + +func (lsmon *LogstashMonitoringAssociation) SetAssociationConf(assocConf *commonv1.AssociationConf) { + if lsmon.MonitoringAssocConfs == nil { + lsmon.MonitoringAssocConfs = make(map[commonv1.ObjectSelector]commonv1.AssociationConf) + } + if assocConf != nil { + lsmon.MonitoringAssocConfs[lsmon.ref] = *assocConf + } +} + +func (lsmon *LogstashMonitoringAssociation) AssociationID() string { + return lsmon.ref.ToID() +} + +func (l *Logstash) GetMonitoringMetricsRefs() []commonv1.ObjectSelector { + return l.Spec.Monitoring.Metrics.ElasticsearchRefs +} + +func (l *Logstash) GetMonitoringLogsRefs() []commonv1.ObjectSelector { + return l.Spec.Monitoring.Logs.ElasticsearchRefs +} + +func (l *Logstash) MonitoringAssociation(esRef commonv1.ObjectSelector) commonv1.Association { + return &LogstashMonitoringAssociation{ + Logstash: l, + ref: esRef.WithDefaultNamespace(l.Namespace), + } +} + +func init() { + SchemeBuilder.Register(&Logstash{}, &LogstashList{}) +} diff --git a/pkg/apis/logstash/v1alpha1/name.go b/pkg/apis/logstash/v1alpha1/name.go new file mode 100644 index 0000000000..8b93b2cf3a --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/name.go @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + common_name "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/name" +) + +const ( + apiServiceSuffix = "api" + configSuffix = "config" + pipelineSuffix = "pipeline" +) + +// Namer is a Namer that is configured with the defaults for resources related to a Logstash resource. +var Namer = common_name.NewNamer("ls") + +// ConfigSecretName returns the name of a secret used to storage Logstash configuration data. +func ConfigSecretName(name string) string { + return Namer.Suffix(name, configSuffix) +} + +// Name returns the name of Logstash. +func Name(name string) string { + return Namer.Suffix(name) +} + +// APIServiceName returns the name of the HTTP service for a given Logstash name. +func APIServiceName(name string) string { + return Namer.Suffix(name, apiServiceSuffix) +} + +func UserServiceName(deployName string, name string) string { + return Namer.Suffix(deployName, name) +} + +func PipelineSecretName(name string) string { + return Namer.Suffix(name, pipelineSuffix) +} diff --git a/pkg/apis/logstash/v1alpha1/name_test.go b/pkg/apis/logstash/v1alpha1/name_test.go new file mode 100644 index 0000000000..2ea1267e34 --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/name_test.go @@ -0,0 +1,81 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + "testing" +) + +func TestAPIService(t *testing.T) { + type args struct { + logstashName string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "sample", + args: args{logstashName: "sample"}, + want: "sample-ls-api", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := APIServiceName(tt.args.logstashName); got != tt.want { + t.Errorf("DefaultService() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConfigSecretName(t *testing.T) { + type args struct { + logstashName string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "sample", + args: args{logstashName: "sample"}, + want: "sample-ls-config", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ConfigSecretName(tt.args.logstashName); got != tt.want { + t.Errorf("ConfigSecret() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestLogstashName(t *testing.T) { + type args struct { + logstashName string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "sample", + args: args{logstashName: "sample"}, + want: "sample-ls", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := Name(tt.args.logstashName); got != tt.want { + t.Errorf("Logstash Name() = %v, want %v", got, tt.want) + } + }) + } +} \ No newline at end of file diff --git a/pkg/apis/logstash/v1alpha1/validations.go b/pkg/apis/logstash/v1alpha1/validations.go new file mode 100644 index 0000000000..da863cf19b --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/validations.go @@ -0,0 +1,107 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/stackmon/validations" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" +) + +var ( + // MinStackMonVersion is the minimum version of Logstash to enable Stack Monitoring on an Elastic Stack application. + // This requirement comes from the fact that we configure Logstash to write logs to disk for Filebeat + // via the env var LOG_STYLE available from this version. + MinStackMonVersion = version.MustParse("8.7.0-SNAPSHOT") + + defaultChecks = []func(*Logstash) field.ErrorList{ + checkNoUnknownFields, + checkNameLength, + checkSupportedVersion, + checkSingleConfigSource, + checkESRefsNamed, + checkMonitoring, + checkAssociations, + checkSinglePipelineSource, + } + + updateChecks = []func(old, curr *Logstash) field.ErrorList{ + checkNoDowngrade, + } +) + +func checkNoUnknownFields(l *Logstash) field.ErrorList { + return commonv1.NoUnknownFields(l, l.ObjectMeta) +} + +func checkNameLength(l *Logstash) field.ErrorList { + return commonv1.CheckNameLength(l) +} + +func checkSupportedVersion(l *Logstash) field.ErrorList { + return commonv1.CheckSupportedStackVersion(l.Spec.Version, version.SupportedLogstashVersions) +} + +func checkNoDowngrade(prev, curr *Logstash) field.ErrorList { + if commonv1.IsConfiguredToAllowDowngrades(curr) { + return nil + } + return commonv1.CheckNoDowngrade(prev.Spec.Version, curr.Spec.Version) +} + +func checkSingleConfigSource(l *Logstash) field.ErrorList { + if l.Spec.Config != nil && l.Spec.ConfigRef != nil { + msg := "Specify at most one of [`config`, `configRef`], not both" + return field.ErrorList{ + field.Forbidden(field.NewPath("spec").Child("config"), msg), + field.Forbidden(field.NewPath("spec").Child("configRef"), msg), + } + } + + return nil +} + +func checkMonitoring(l *Logstash) field.ErrorList { + return validations.Validate(l, l.Spec.Version, MinStackMonVersion) +} + +func checkAssociations(l *Logstash) field.ErrorList { + monitoringPath := field.NewPath("spec").Child("monitoring") + err1 := commonv1.CheckAssociationRefs(monitoringPath.Child("metrics"), l.GetMonitoringMetricsRefs()...) + err2 := commonv1.CheckAssociationRefs(monitoringPath.Child("logs"), l.GetMonitoringLogsRefs()...) + err3 := commonv1.CheckAssociationRefs(field.NewPath("spec").Child("elasticsearchRefs"), l.ElasticsearchRefs()...) + return append(append(err1, err2...), err3...) +} + +func checkSinglePipelineSource(a *Logstash) field.ErrorList { + if a.Spec.Pipelines != nil && a.Spec.PipelinesRef != nil { + msg := "Specify at most one of [`pipelines`, `pipelinesRef`], not both" + return field.ErrorList{ + field.Forbidden(field.NewPath("spec").Child("pipelines"), msg), + field.Forbidden(field.NewPath("spec").Child("pipelinesRef"), msg), + } + } + + return nil +} + +func checkESRefsNamed(l *Logstash) field.ErrorList { + var errorList field.ErrorList + for i, esRef := range l.Spec.ElasticsearchRefs { + if esRef.ClusterName == "" { + errorList = append( + errorList, + field.Required( + field.NewPath("spec").Child("elasticsearchRefs").Index(i).Child("clusterName"), + fmt.Sprintf("clusterName is a mandatory field - missing on %v", esRef.NamespacedName())), + ) + } + } + return errorList +} diff --git a/pkg/apis/logstash/v1alpha1/validations_test.go b/pkg/apis/logstash/v1alpha1/validations_test.go new file mode 100644 index 0000000000..24851bfa75 --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/validations_test.go @@ -0,0 +1,404 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" +) + +func TestCheckNameLength(t *testing.T) { + testCases := []struct { + name string + logstashName string + wantErr bool + wantErrMsg string + }{ + { + name: "valid configuration", + logstashName: "test-logstash", + wantErr: false, + }, + { + name: "long Logstash name", + logstashName: "extremely-long-winded-and-unnecessary-name-for-logstash", + wantErr: true, + wantErrMsg: "name exceeds maximum allowed length", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ls := Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: tc.logstashName, + Namespace: "test", + }, + Spec: LogstashSpec{}, + } + + errList := checkNameLength(&ls) + assert.Equal(t, tc.wantErr, len(errList) > 0) + }) + } +} + +func TestCheckNoUnknownFields(t *testing.T) { + type args struct { + prev *Logstash + curr *Logstash + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "No downgrade", + args: args{ + prev: &Logstash{Spec: LogstashSpec{Version: "7.17.0"}}, + curr: &Logstash{Spec: LogstashSpec{Version: "8.6.1"}}, + }, + want: nil, + }, + { + name: "Downgrade NOK", + args: args{ + prev: &Logstash{Spec: LogstashSpec{Version: "8.6.1"}}, + curr: &Logstash{Spec: LogstashSpec{Version: "8.5.0"}}, + }, + want: field.ErrorList{&field.Error{Type: field.ErrorTypeForbidden, Field: "spec.version", BadValue: "", Detail: "Version downgrades are not supported"}}, + }, + { + name: "Downgrade with override OK", + args: args{ + prev: &Logstash{Spec: LogstashSpec{Version: "8.6.1"}}, + curr: &Logstash{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{ + commonv1.DisableDowngradeValidationAnnotation: "true", + }}, Spec: LogstashSpec{Version: "8.5.0"}}, + }, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, checkNoDowngrade(tt.args.prev, tt.args.curr), "checkNoDowngrade(%v, %v)", tt.args.prev, tt.args.curr) + }) + } +} + +func Test_checkSingleConfigSource(t *testing.T) { + tests := []struct { + name string + logstash Logstash + wantErr bool + }{ + { + name: "configRef absent, config present", + logstash: Logstash{ + Spec: LogstashSpec{ + Config: &commonv1.Config{}, + }, + }, + wantErr: false, + }, + { + name: "config absent, configRef present", + logstash: Logstash{ + Spec: LogstashSpec{ + ConfigRef: &commonv1.ConfigSource{}, + }, + }, + wantErr: false, + }, + { + name: "neither present", + logstash: Logstash{ + Spec: LogstashSpec{}, + }, + wantErr: false, + }, + { + name: "both present", + logstash: Logstash{ + Spec: LogstashSpec{ + Config: &commonv1.Config{}, + ConfigRef: &commonv1.ConfigSource{}, + }, + }, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := checkSingleConfigSource(&tc.logstash) + assert.Equal(t, tc.wantErr, len(got) > 0) + }) + } +} + +func Test_checkSinglePipelineSource(t *testing.T) { + tests := []struct { + name string + logstash Logstash + wantErr bool + }{ + { + name: "pipelinesRef absent, pipelines present", + logstash: Logstash{ + Spec: LogstashSpec{ + Pipelines: []commonv1.Config{}, + }, + }, + wantErr: false, + }, + { + name: "pipelines absent, pipelinesRef present", + logstash: Logstash{ + Spec: LogstashSpec{ + PipelinesRef: &commonv1.ConfigSource{}, + }, + }, + wantErr: false, + }, + { + name: "neither present", + logstash: Logstash{ + Spec: LogstashSpec{}, + }, + wantErr: false, + }, + { + name: "both present", + logstash: Logstash{ + Spec: LogstashSpec{ + Pipelines: []commonv1.Config{}, + PipelinesRef: &commonv1.ConfigSource{}, + }, + }, + wantErr: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := checkSinglePipelineSource(&tc.logstash) + assert.Equal(t, tc.wantErr, len(got) > 0) + }) + } +} + +func Test_checkSupportedVersion(t *testing.T) { + for _, tt := range []struct { + name string + version string + wantErr bool + }{ + { + name: "below min supported", + version: "8.5.0", + wantErr: true, + }, + { + name: "above max supported", + version: "9.0.0", + wantErr: true, + }, + { + name: "above min supported", + version: "8.7.1", + wantErr: false, + }, + } { + t.Run(tt.name, func(t *testing.T) { + a := Logstash{ + Spec: LogstashSpec{ + Version: tt.version, + }, + } + got := checkSupportedVersion(&a) + assert.Equal(t, tt.wantErr, len(got) > 0) + }) + } +} + +func Test_checkEsRefsAssociations(t *testing.T) { + type args struct { + b *Logstash + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "no ref: OK", + args: args{ + b: &Logstash{}, + }, + wantErr: false, + }, + { + name: "mix secret named and named refs: OK", + args: args{ + b: &Logstash{ + Spec: LogstashSpec{ + ElasticsearchRefs: []ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{SecretName: "bla"}, + ClusterName: "test", + }, + { + ObjectSelector: commonv1.ObjectSelector{Name: "bla", Namespace: "blub"}, + ClusterName: "test2", + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "secret named ref with a name: NOK", + args: args{ + b: &Logstash{ + Spec: LogstashSpec{ + ElasticsearchRefs: []ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{SecretName: "bla", Name: "bla"}, + ClusterName: "test", + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "no name or secret name with namespace: NOK", + args: args{ + b: &Logstash{ + Spec: LogstashSpec{ + ElasticsearchRefs: []ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{Namespace: "blub"}, + ClusterName: "test", + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "no name or secret name with serviceName: NOK", + args: args{ + b: &Logstash{ + Spec: LogstashSpec{ + ElasticsearchRefs: []ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{ServiceName: "ble"}, + ClusterName: "test", + }, + }, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := checkAssociations(tt.args.b) + assert.Equal(t, tt.wantErr, len(got) > 0) + }) + } +} + +func Test_checkESRefsNamed(t *testing.T) { + type args struct { + b *Logstash + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "no ref: OK", + args: args{ + b: &Logstash{}, + }, + wantErr: false, + }, + { + name: "one ref, missing clusterName: NOK", + args: args{ + b: &Logstash{ + Spec: LogstashSpec{ + ElasticsearchRefs: []ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{Name: "bla", Namespace: "blub"}, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "multiple refs, each with clusterName: OK", + args: args{ + b: &Logstash{ + Spec: LogstashSpec{ + ElasticsearchRefs: []ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{Name: "bla", Namespace: "blub"}, + ClusterName: "bla", + }, + { + ObjectSelector: commonv1.ObjectSelector{Name: "bla", Namespace: "blub"}, + ClusterName: "blub", + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "multiple refs, one missing clusterName: NOK", + args: args{ + b: &Logstash{ + Spec: LogstashSpec{ + ElasticsearchRefs: []ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{Name: "bla", Namespace: "blub"}, + ClusterName: "", + }, + { + ObjectSelector: commonv1.ObjectSelector{Name: "bla", Namespace: "blub"}, + ClusterName: "default", + }, + }, + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := checkESRefsNamed(tt.args.b) + assert.Equal(t, tt.wantErr, len(got) > 0) + }) + } +} diff --git a/pkg/apis/logstash/v1alpha1/webhook.go b/pkg/apis/logstash/v1alpha1/webhook.go new file mode 100644 index 0000000000..e01092d610 --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/webhook.go @@ -0,0 +1,84 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1 + +import ( + "errors" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + ulog "github.com/elastic/cloud-on-k8s/v2/pkg/utils/log" +) + +const ( + // webhookPath is the HTTP path for the Elastic Logstash validating webhook. + webhookPath = "/validate-logstash-k8s-elastic-co-v1alpha1-logstash" +) + +var ( + groupKind = schema.GroupKind{Group: GroupVersion.Group, Kind: Kind} + validationLog = ulog.Log.WithName("logstash-v1alpha1-validation") +) + +// +kubebuilder:webhook:path=/validate-logstash-k8s-elastic-co-v1alpha1-logstash,mutating=false,failurePolicy=ignore,groups=logstash.k8s.elastic.co,resources=logstashes,verbs=create;update,versions=v1alpha1,name=elastic-logstash-validation-v1alpha1.k8s.elastic.co,sideEffects=None,admissionReviewVersions=v1;v1beta1,matchPolicy=Exact + +var _ webhook.Validator = &Logstash{} + +// ValidateCreate is called by the validating webhook to validate the create operation. +// Satisfies the webhook.Validator interface. +func (l *Logstash) ValidateCreate() error { + validationLog.V(1).Info("Validate create", "name", l.Name) + return l.validate(nil) +} + +// ValidateDelete is called by the validating webhook to validate the delete operation. +// Satisfies the webhook.Validator interface. +func (l *Logstash) ValidateDelete() error { + validationLog.V(1).Info("Validate delete", "name", l.Name) + return nil +} + +// ValidateUpdate is called by the validating webhook to validate the update operation. +// Satisfies the webhook.Validator interface. +func (l *Logstash) ValidateUpdate(old runtime.Object) error { + validationLog.V(1).Info("Validate update", "name", l.Name) + oldObj, ok := old.(*Logstash) + if !ok { + return errors.New("cannot cast old object to Logstash type") + } + + return l.validate(oldObj) +} + +// WebhookPath returns the HTTP path used by the validating webhook. +func (l *Logstash) WebhookPath() string { + return webhookPath +} + +func (l *Logstash) validate(old *Logstash) error { + var errors field.ErrorList + if old != nil { + for _, uc := range updateChecks { + if err := uc(old, l); err != nil { + errors = append(errors, err...) + } + } + } + + for _, dc := range defaultChecks { + if err := dc(l); err != nil { + errors = append(errors, err...) + } + } + + if len(errors) > 0 { + return apierrors.NewInvalid(groupKind, l.Name, errors) + } + return nil +} diff --git a/pkg/apis/logstash/v1alpha1/webhook_test.go b/pkg/apis/logstash/v1alpha1/webhook_test.go new file mode 100644 index 0000000000..dc04599c9a --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/webhook_test.go @@ -0,0 +1,124 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package v1alpha1_test + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/test" +) + +func TestWebhook(t *testing.T) { + testCases := []test.ValidationWebhookTestCase{ + { + Name: "simple-stackmon-ref", + Operation: admissionv1beta1.Create, + Object: func(t *testing.T, uid string) []byte { + t.Helper() + ls := mkLogstash(uid) + ls.Spec.Version = "8.7.0" + ls.Spec.Monitoring = commonv1.Monitoring{Metrics: commonv1.MetricsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{Name: "esmonname", Namespace: "esmonns"}}}} + return serialize(t, ls) + }, + Check: test.ValidationWebhookSucceeded, + }, + { + Name: "multiple-stackmon-ref", + Operation: admissionv1beta1.Create, + Object: func(t *testing.T, uid string) []byte { + t.Helper() + ls := mkLogstash(uid) + ls.Spec.Version = "8.7.0" + ls.Spec.Monitoring = commonv1.Monitoring{ + Metrics: commonv1.MetricsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{SecretName: "es1monname"}}}, + Logs: commonv1.LogsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{SecretName: "es2monname"}}}, + } + return serialize(t, ls) + }, + Check: test.ValidationWebhookSucceeded, + }, + { + Name: "invalid-version-for-stackmon", + Operation: admissionv1beta1.Create, + Object: func(t *testing.T, uid string) []byte { + t.Helper() + ls := mkLogstash(uid) + ls.Spec.Version = "7.13.0" + ls.Spec.Monitoring = commonv1.Monitoring{Metrics: commonv1.MetricsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{Name: "esmonname", Namespace: "esmonns"}}}} + return serialize(t, ls) + }, + Check: test.ValidationWebhookFailed( + `spec.version: Invalid value: "7.13.0": Unsupported version for Stack Monitoring. Required >= 8.7.0`, + ), + }, + { + Name: "invalid-stackmon-ref-with-name", + Operation: admissionv1beta1.Create, + Object: func(t *testing.T, uid string) []byte { + t.Helper() + ls := mkLogstash(uid) + ls.Spec.Version = "8.7.0" + ls.Spec.Monitoring = commonv1.Monitoring{ + Metrics: commonv1.MetricsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{SecretName: "es1monname", Name: "xx"}}}, + Logs: commonv1.LogsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{SecretName: "es2monname"}}}, + } + return serialize(t, ls) + }, + Check: test.ValidationWebhookFailed( + `spec.monitoring.metrics: Forbidden: Invalid association reference: specify name or secretName, not both`, + ), + }, + { + Name: "invalid-stackmon-ref-with-service-name", + Operation: admissionv1beta1.Create, + Object: func(t *testing.T, uid string) []byte { + t.Helper() + ls := mkLogstash(uid) + ls.Spec.Version = "8.7.0" + ls.Spec.Monitoring = commonv1.Monitoring{ + Metrics: commonv1.MetricsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{SecretName: "es1monname"}}}, + Logs: commonv1.LogsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{SecretName: "es2monname", ServiceName: "xx"}}}, + } + return serialize(t, ls) + }, + Check: test.ValidationWebhookFailed( + `spec.monitoring.logs: Forbidden: Invalid association reference: serviceName or namespace can only be used in combination with name, not with secretName`, + ), + }, + } + + validator := &v1alpha1.Logstash{} + gvk := metav1.GroupVersionKind{Group: v1alpha1.GroupVersion.Group, Version: v1alpha1.GroupVersion.Version, Kind: v1alpha1.Kind} + test.RunValidationWebhookTests(t, gvk, validator, testCases...) +} + +func mkLogstash(uid string) *v1alpha1.Logstash { + return &v1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "webhook-test", + UID: types.UID(uid), + }, + Spec: v1alpha1.LogstashSpec{ + Version: "8.6.0", + }, + } +} + +func serialize(t *testing.T, k *v1alpha1.Logstash) []byte { + t.Helper() + + objBytes, err := json.Marshal(k) + require.NoError(t, err) + + return objBytes +} diff --git a/pkg/apis/logstash/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/logstash/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..27ceaac3a2 --- /dev/null +++ b/pkg/apis/logstash/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,254 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchCluster) DeepCopyInto(out *ElasticsearchCluster) { + *out = *in + out.ObjectSelector = in.ObjectSelector +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchCluster. +func (in *ElasticsearchCluster) DeepCopy() *ElasticsearchCluster { + if in == nil { + return nil + } + out := new(ElasticsearchCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Logstash) DeepCopyInto(out *Logstash) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + if in.EsAssocConfs != nil { + in, out := &in.EsAssocConfs, &out.EsAssocConfs + *out = make(map[v1.ObjectSelector]v1.AssociationConf, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MonitoringAssocConfs != nil { + in, out := &in.MonitoringAssocConfs, &out.MonitoringAssocConfs + *out = make(map[v1.ObjectSelector]v1.AssociationConf, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logstash. +func (in *Logstash) DeepCopy() *Logstash { + if in == nil { + return nil + } + out := new(Logstash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Logstash) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashESAssociation) DeepCopyInto(out *LogstashESAssociation) { + *out = *in + if in.Logstash != nil { + in, out := &in.Logstash, &out.Logstash + *out = new(Logstash) + (*in).DeepCopyInto(*out) + } + out.ElasticsearchCluster = in.ElasticsearchCluster +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashESAssociation. +func (in *LogstashESAssociation) DeepCopy() *LogstashESAssociation { + if in == nil { + return nil + } + out := new(LogstashESAssociation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashList) DeepCopyInto(out *LogstashList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Logstash, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashList. +func (in *LogstashList) DeepCopy() *LogstashList { + if in == nil { + return nil + } + out := new(LogstashList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LogstashList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashMonitoringAssociation) DeepCopyInto(out *LogstashMonitoringAssociation) { + *out = *in + if in.Logstash != nil { + in, out := &in.Logstash, &out.Logstash + *out = new(Logstash) + (*in).DeepCopyInto(*out) + } + out.ref = in.ref +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashMonitoringAssociation. +func (in *LogstashMonitoringAssociation) DeepCopy() *LogstashMonitoringAssociation { + if in == nil { + return nil + } + out := new(LogstashMonitoringAssociation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashService) DeepCopyInto(out *LogstashService) { + *out = *in + in.Service.DeepCopyInto(&out.Service) + in.TLS.DeepCopyInto(&out.TLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashService. +func (in *LogstashService) DeepCopy() *LogstashService { + if in == nil { + return nil + } + out := new(LogstashService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashSpec) DeepCopyInto(out *LogstashSpec) { + *out = *in + if in.ElasticsearchRefs != nil { + in, out := &in.ElasticsearchRefs, &out.ElasticsearchRefs + *out = make([]ElasticsearchCluster, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = (*in).DeepCopy() + } + if in.ConfigRef != nil { + in, out := &in.ConfigRef, &out.ConfigRef + *out = new(v1.ConfigSource) + **out = **in + } + if in.Pipelines != nil { + in, out := &in.Pipelines, &out.Pipelines + *out = make([]v1.Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PipelinesRef != nil { + in, out := &in.PipelinesRef, &out.PipelinesRef + *out = new(v1.ConfigSource) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]LogstashService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Monitoring.DeepCopyInto(&out.Monitoring) + in.PodTemplate.DeepCopyInto(&out.PodTemplate) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.SecureSettings != nil { + in, out := &in.SecureSettings, &out.SecureSettings + *out = make([]v1.SecretSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashSpec. +func (in *LogstashSpec) DeepCopy() *LogstashSpec { + if in == nil { + return nil + } + out := new(LogstashSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogstashStatus) DeepCopyInto(out *LogstashStatus) { + *out = *in + if in.ElasticsearchAssociationsStatus != nil { + in, out := &in.ElasticsearchAssociationsStatus, &out.ElasticsearchAssociationsStatus + *out = make(v1.AssociationStatusMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MonitoringAssociationStatus != nil { + in, out := &in.MonitoringAssociationStatus, &out.MonitoringAssociationStatus + *out = make(v1.AssociationStatusMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogstashStatus. +func (in *LogstashStatus) DeepCopy() *LogstashStatus { + if in == nil { + return nil + } + out := new(LogstashStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/agent/controller.go b/pkg/controller/agent/controller.go index 62ed7001ce..ab57ced8e3 100644 --- a/pkg/controller/agent/controller.go +++ b/pkg/controller/agent/controller.go @@ -154,7 +154,7 @@ func (r *ReconcileAgent) Reconcile(ctx context.Context, request reconcile.Reques } result, err := results.Aggregate() - k8s.EmitErrorEvent(r.recorder, err, agent, events.EventReconciliationError, "Reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, agent, events.EventReconciliationError, "Reconciliation error: %v", err) return result, err } @@ -195,7 +195,7 @@ func (r *ReconcileAgent) validate(ctx context.Context, agent agentv1alpha1.Agent // Run create validations only as update validations require old object which we don't have here. if err := agent.ValidateCreate(); err != nil { logconf.FromContext(ctx).Error(err, "Validation failed") - k8s.EmitErrorEvent(r.recorder, err, &agent, events.EventReasonValidation, err.Error()) + k8s.MaybeEmitErrorEvent(r.recorder, err, &agent, events.EventReasonValidation, err.Error()) return tracing.CaptureError(ctx, err) } return nil diff --git a/pkg/controller/apmserver/controller.go b/pkg/controller/apmserver/controller.go index b1a43595aa..ea35db0df6 100644 --- a/pkg/controller/apmserver/controller.go +++ b/pkg/controller/apmserver/controller.go @@ -245,7 +245,7 @@ func (r *ReconcileApmServer) doReconcile(ctx context.Context, as *apmv1.ApmServe }.ReconcileCAAndHTTPCerts(ctx) if results.HasError() { _, err := results.Aggregate() - k8s.EmitErrorEvent(r.recorder, err, as, events.EventReconciliationError, "Certificate reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, as, events.EventReconciliationError, "Certificate reconciliation error: %v", err) return results, state } @@ -268,14 +268,14 @@ func (r *ReconcileApmServer) doReconcile(ctx context.Context, as *apmv1.ApmServe log.V(1).Info("Conflict while updating status") return results.WithResult(reconcile.Result{Requeue: true}), state } - k8s.EmitErrorEvent(r.recorder, err, as, events.EventReconciliationError, "Deployment reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, as, events.EventReconciliationError, "Deployment reconciliation error: %v", err) return results.WithError(tracing.CaptureError(ctx, err)), state } state.UpdateApmServerExternalService(*svc) _, err = results.WithError(err).Aggregate() - k8s.EmitErrorEvent(r.recorder, err, as, events.EventReconciliationError, "Reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, as, events.EventReconciliationError, "Reconciliation error: %v", err) return results, state } @@ -285,7 +285,7 @@ func (r *ReconcileApmServer) validate(ctx context.Context, as *apmv1.ApmServer) if err := as.ValidateCreate(); err != nil { log.Error(err, "Validation failed") - k8s.EmitErrorEvent(r.recorder, err, as, events.EventReasonValidation, err.Error()) + k8s.MaybeEmitErrorEvent(r.recorder, err, as, events.EventReasonValidation, err.Error()) return tracing.CaptureError(vctx, err) } diff --git a/pkg/controller/association/controller/logstash_es.go b/pkg/controller/association/controller/logstash_es.go new file mode 100644 index 0000000000..323f44d926 --- /dev/null +++ b/pkg/controller/association/controller/logstash_es.go @@ -0,0 +1,66 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package controller + +import ( + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/association" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/operator" + eslabel "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/label" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/user" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/rbac" +) + +const ( + // LogstashAssociationLabelName marks resources created for an association originating from Logstash with the + // Logstash name. + LogstashAssociationLabelName = "logstashassociation.k8s.elastic.co/name" + // LogstashAssociationLabelNamespace marks resources created for an association originating from Logstash with the + // Logstash namespace. + LogstashAssociationLabelNamespace = "logstashassociation.k8s.elastic.co/namespace" + // LogstashAssociationLabelType marks resources created for an association originating from Logstash + // with the target resource type (e.g. "elasticsearch"). + LogstashAssociationLabelType = "logstashassociation.k8s.elastic.co/type" +) + +func AddLogstashES(mgr manager.Manager, accessReviewer rbac.AccessReviewer, params operator.Parameters) error { + return association.AddAssociationController(mgr, accessReviewer, params, association.AssociationInfo{ + AssociationType: commonv1.ElasticsearchAssociationType, + AssociatedObjTemplate: func() commonv1.Associated { return &logstashv1alpha1.Logstash{} }, + ReferencedObjTemplate: func() client.Object { return &esv1.Elasticsearch{} }, + ReferencedResourceVersion: referencedElasticsearchStatusVersion, + ExternalServiceURL: getElasticsearchExternalURL, + ReferencedResourceNamer: esv1.ESNamer, + AssociationName: "logstash-es", + AssociatedShortName: "logstash", + Labels: func(associated types.NamespacedName) map[string]string { + return map[string]string{ + LogstashAssociationLabelName: associated.Name, + LogstashAssociationLabelNamespace: associated.Namespace, + LogstashAssociationLabelType: commonv1.ElasticsearchAssociationType, + } + }, + AssociationConfAnnotationNameBase: commonv1.ElasticsearchConfigAnnotationNameBase, + AssociationResourceNameLabelName: eslabel.ClusterNameLabelName, + AssociationResourceNamespaceLabelName: eslabel.ClusterNamespaceLabelName, + + ElasticsearchUserCreation: &association.ElasticsearchUserCreation{ + ElasticsearchRef: func(c k8s.Client, association commonv1.Association) (bool, commonv1.ObjectSelector, error) { + return true, association.AssociationRef(), nil + }, + UserSecretSuffix: "logstash-user", + ESUserRole: func(associated commonv1.Associated) (string, error) { + return user.LogstashUserRole, nil + }, + }, + }) +} diff --git a/pkg/controller/association/controller/logstash_monitoring.go b/pkg/controller/association/controller/logstash_monitoring.go new file mode 100644 index 0000000000..c97398d3c1 --- /dev/null +++ b/pkg/controller/association/controller/logstash_monitoring.go @@ -0,0 +1,57 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package controller + +import ( + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/association" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/operator" + eslabel "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/label" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/user" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/rbac" +) + +// AddLogstashMonitoring reconciles an association between Logstash and Elasticsearch clusters for Stack Monitoring. +// Beats are configured to collect monitoring metrics and logs data of the associated Logstash and send +// them to the Elasticsearch referenced in the association. +func AddLogstashMonitoring(mgr manager.Manager, accessReviewer rbac.AccessReviewer, params operator.Parameters) error { + return association.AddAssociationController(mgr, accessReviewer, params, association.AssociationInfo{ + AssociatedObjTemplate: func() commonv1.Associated { return &logstashv1alpha1.Logstash{} }, + ReferencedObjTemplate: func() client.Object { return &esv1.Elasticsearch{} }, + ReferencedResourceVersion: referencedElasticsearchStatusVersion, + ExternalServiceURL: getElasticsearchExternalURL, + AssociationType: commonv1.LogstashMonitoringAssociationType, + ReferencedResourceNamer: esv1.ESNamer, + AssociationName: "ls-monitoring", + AssociatedShortName: "ls-mon", + Labels: func(associated types.NamespacedName) map[string]string { + return map[string]string{ + LogstashAssociationLabelName: associated.Name, + LogstashAssociationLabelNamespace: associated.Namespace, + LogstashAssociationLabelType: commonv1.LogstashMonitoringAssociationType, + } + }, + AssociationConfAnnotationNameBase: commonv1.ElasticsearchConfigAnnotationNameBase, + AssociationResourceNameLabelName: eslabel.ClusterNameLabelName, + AssociationResourceNamespaceLabelName: eslabel.ClusterNamespaceLabelName, + + ElasticsearchUserCreation: &association.ElasticsearchUserCreation{ + ElasticsearchRef: func(c k8s.Client, association commonv1.Association) (bool, commonv1.ObjectSelector, error) { + return true, association.AssociationRef(), nil + }, + UserSecretSuffix: "beat-ls-mon-user", + ESUserRole: func(associated commonv1.Associated) (string, error) { + return user.StackMonitoringUserRole, nil + }, + }, + }) +} diff --git a/pkg/controller/association/reconciler.go b/pkg/controller/association/reconciler.go index 1b91282773..c0801453f8 100644 --- a/pkg/controller/association/reconciler.go +++ b/pkg/controller/association/reconciler.go @@ -402,7 +402,7 @@ func (r *Reconciler) getElasticsearch( var es esv1.Elasticsearch err := r.Get(ctx, elasticsearchRef.NamespacedName(), &es) if err != nil { - k8s.EmitErrorEvent(r.recorder, err, association, events.EventAssociationError, + k8s.MaybeEmitErrorEvent(r.recorder, err, association, events.EventAssociationError, "Failed to find referenced backend %s: %v", elasticsearchRef.NamespacedName(), err) if apierrors.IsNotFound(err) { // ES is not found, remove any existing backend configuration and retry in a bit. diff --git a/pkg/controller/beat/controller.go b/pkg/controller/beat/controller.go index f7aef5f04f..e1b0ebcdde 100644 --- a/pkg/controller/beat/controller.go +++ b/pkg/controller/beat/controller.go @@ -160,7 +160,7 @@ func (r *ReconcileBeat) Reconcile(ctx context.Context, request reconcile.Request } res, err := results.Aggregate() - k8s.EmitErrorEvent(r.recorder, err, &beat, events.EventReconciliationError, "Reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, &beat, events.EventReconciliationError, "Reconciliation error: %v", err) return res, err } @@ -192,7 +192,7 @@ func (r *ReconcileBeat) validate(ctx context.Context, beat *beatv1beta1.Beat) er if err := beat.ValidateCreate(); err != nil { ulog.FromContext(ctx).Error(err, "Validation failed") - k8s.EmitErrorEvent(r.recorder, err, beat, events.EventReasonValidation, err.Error()) + k8s.MaybeEmitErrorEvent(r.recorder, err, beat, events.EventReasonValidation, err.Error()) return tracing.CaptureError(vctx, err) } diff --git a/pkg/controller/common/configref.go b/pkg/controller/common/configref.go index 0f100206b8..89961372b8 100644 --- a/pkg/controller/common/configref.go +++ b/pkg/controller/common/configref.go @@ -8,6 +8,9 @@ import ( "context" "fmt" + "github.com/elastic/go-ucfg" + uyaml "github.com/elastic/go-ucfg/yaml" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -34,6 +37,23 @@ func ParseConfigRef( configRef *commonv1.ConfigSource, secretKey string, // retrieve config data from that entry in the secret ) (*settings.CanonicalConfig, error) { + parsed, err := ParseConfigRefToConfig(driver, resource, configRef, secretKey, ConfigRefWatchName, settings.Options) + if err != nil { + return nil, err + } + return (*settings.CanonicalConfig)(parsed), nil +} + +// ParseConfigRefToConfig retrieves the content of a secret referenced in `configRef`, sets up dynamic watches for that secret, +// and parses the secret content into ucfg.Config. +func ParseConfigRefToConfig( + driver driver.Interface, + resource runtime.Object, // eg. Beat, EnterpriseSearch + configRef *commonv1.ConfigSource, + secretKey string, // retrieve config data from that entry in the secret + configRefWatchName func(types.NamespacedName) string, + configOptions []ucfg.Option, +) (*ucfg.Config, error) { resourceMeta, err := meta.Accessor(resource) if err != nil { return nil, err @@ -46,7 +66,7 @@ func ParseConfigRef( if configRef != nil && configRef.SecretName != "" { secretNames = append(secretNames, configRef.SecretName) } - if err := watches.WatchUserProvidedSecrets(resourceNsn, driver.DynamicWatches(), ConfigRefWatchName(resourceNsn), secretNames); err != nil { + if err := watches.WatchUserProvidedSecrets(resourceNsn, driver.DynamicWatches(), configRefWatchName(resourceNsn), secretNames); err != nil { return nil, err } @@ -66,7 +86,9 @@ func ParseConfigRef( driver.Recorder().Event(resource, corev1.EventTypeWarning, events.EventReasonUnexpected, msg) return nil, errors.New(msg) } - parsed, err := settings.ParseConfig(data) + + parsed, err := uyaml.NewConfig(data, configOptions...) + if err != nil { msg := fmt.Sprintf("unable to parse %s in configRef secret %s/%s", secretKey, namespace, configRef.SecretName) driver.Recorder().Event(resource, corev1.EventTypeWarning, events.EventReasonUnexpected, msg) diff --git a/pkg/controller/common/container/container.go b/pkg/controller/common/container/container.go index 1730738122..5056dcc4cc 100644 --- a/pkg/controller/common/container/container.go +++ b/pkg/controller/common/container/container.go @@ -40,6 +40,7 @@ const ( PacketbeatImage Image = "beats/packetbeat" AgentImage Image = "beats/elastic-agent" MapsImage Image = "elastic-maps-service/elastic-maps-server-ubi8" + LogstashImage Image = "logstash/logstash" ) // ImageRepository returns the full container image name by concatenating the current container registry and the image path with the given version. diff --git a/pkg/controller/common/reconciler/secret.go b/pkg/controller/common/reconciler/secret.go index 0b6026f87a..d141d86a83 100644 --- a/pkg/controller/common/reconciler/secret.go +++ b/pkg/controller/common/reconciler/secret.go @@ -30,11 +30,18 @@ const ( SoftOwnerKindLabel = "eck.k8s.elastic.co/owner-kind" ) +func WithPostUpdate(f func()) func(p *Params) { + return func(p *Params) { + p.PostUpdate = f + } +} + // ReconcileSecret creates or updates the actual secret to match the expected one. // Existing annotations or labels that are not expected are preserved. -func ReconcileSecret(ctx context.Context, c k8s.Client, expected corev1.Secret, owner client.Object) (corev1.Secret, error) { +func ReconcileSecret(ctx context.Context, c k8s.Client, expected corev1.Secret, owner client.Object, opts ...func(*Params)) (corev1.Secret, error) { var reconciled corev1.Secret - if err := ReconcileResource(Params{ + + params := Params{ Context: ctx, Client: c, Owner: owner, @@ -54,7 +61,11 @@ func ReconcileSecret(ctx context.Context, c k8s.Client, expected corev1.Secret, reconciled.Annotations = maps.Merge(reconciled.Annotations, expected.Annotations) reconciled.Data = expected.Data }, - }); err != nil { + } + for _, opt := range opts { + opt(¶ms) + } + if err := ReconcileResource(params); err != nil { return corev1.Secret{}, err } return reconciled, nil diff --git a/pkg/controller/common/scheme/scheme.go b/pkg/controller/common/scheme/scheme.go index fb51fe2d66..51ce53cdd3 100644 --- a/pkg/controller/common/scheme/scheme.go +++ b/pkg/controller/common/scheme/scheme.go @@ -7,6 +7,8 @@ package scheme import ( "sync" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -55,6 +57,7 @@ func SetupScheme() { agentv1alpha1.AddToScheme, emsv1alpha1.AddToScheme, policyv1alpha1.AddToScheme, + logstashv1alpha1.AddToScheme, } mustAddSchemeOnce(&addToScheme, schemes) } @@ -72,6 +75,7 @@ func SetupV1beta1Scheme() { entv1beta1.AddToScheme, beatv1beta1.AddToScheme, agentv1alpha1.AddToScheme, + logstashv1alpha1.AddToScheme, } mustAddSchemeOnce(&addToSchemeV1beta1, schemes) } diff --git a/pkg/controller/common/stackmon/validations/validations.go b/pkg/controller/common/stackmon/validations/validations.go index 45a435ebe8..c83c2a865e 100644 --- a/pkg/controller/common/stackmon/validations/validations.go +++ b/pkg/controller/common/stackmon/validations/validations.go @@ -31,12 +31,12 @@ var ( // Validate validates that the resource version is supported for Stack Monitoring and that there is exactly one // Elasticsearch reference defined to send monitoring data when Stack Monitoring is defined -func Validate(resource monitoring.HasMonitoring, version string) field.ErrorList { +func Validate(resource monitoring.HasMonitoring, version string, minVersion version.Version) field.ErrorList { var errs field.ErrorList if monitoring.IsDefined(resource) { - err := IsSupportedVersion(version) + err := IsSupportedVersion(version, minVersion) if err != nil { - finalMinStackVersion, _ := semver.FinalizeVersion(MinStackVersion.String()) // discards prerelease suffix + finalMinStackVersion, _ := semver.FinalizeVersion(minVersion.String()) // discards prerelease suffix errs = append(errs, field.Invalid(field.NewPath("spec").Child("version"), version, fmt.Sprintf(UnsupportedVersionMsg, finalMinStackVersion))) } @@ -54,14 +54,14 @@ func Validate(resource monitoring.HasMonitoring, version string) field.ErrorList return errs } -// IsSupportedVersion returns true if the resource version is supported for Stack Monitoring, else returns false -func IsSupportedVersion(v string) error { +// IsSupportedVersion returns error if the resource version is not supported for Stack Monitoring +func IsSupportedVersion(v string, minVersion version.Version) error { ver, err := version.Parse(v) if err != nil { return err } - if ver.LT(MinStackVersion) { - return fmt.Errorf("unsupported version for Stack Monitoring: required >= %s", MinStackVersion) + if ver.LT(minVersion) { + return fmt.Errorf("unsupported version for Stack Monitoring: required >= %s", minVersion) } return nil } diff --git a/pkg/controller/common/stackmon/validations/validations_test.go b/pkg/controller/common/stackmon/validations/validations_test.go index 0679d08b0c..e3e2c93b81 100644 --- a/pkg/controller/common/stackmon/validations/validations_test.go +++ b/pkg/controller/common/stackmon/validations/validations_test.go @@ -97,7 +97,7 @@ func TestValidate(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - err := Validate(&tc.es, tc.es.Spec.Version) + err := Validate(&tc.es, tc.es.Spec.Version, MinStackVersion) if len(err) > 0 { require.True(t, tc.isErr) } else { diff --git a/pkg/controller/common/version/version.go b/pkg/controller/common/version/version.go index 0b9e379a4f..0959920647 100644 --- a/pkg/controller/common/version/version.go +++ b/pkg/controller/common/version/version.go @@ -33,6 +33,7 @@ var ( // Due to bugfixes present in 7.14 that ECK depends on, this is the lowest version we support in Fleet mode. SupportedFleetModeAgentVersions = MinMaxVersion{Min: MustParse("7.14.0-SNAPSHOT"), Max: From(8, 99, 99)} SupportedMapsVersions = MinMaxVersion{Min: From(7, 11, 0), Max: From(8, 99, 99)} + SupportedLogstashVersions = MinMaxVersion{Min: From(8, 6, 0), Max: From(8, 99, 99)} // minPreReleaseVersion is the lowest prerelease identifier as numeric prerelease takes precedence before // alphanumeric ones and it can't have leading zeros. diff --git a/pkg/controller/elasticsearch/certificates/reconcile.go b/pkg/controller/elasticsearch/certificates/reconcile.go index b28baa419a..2d23a02835 100644 --- a/pkg/controller/elasticsearch/certificates/reconcile.go +++ b/pkg/controller/elasticsearch/certificates/reconcile.go @@ -71,7 +71,7 @@ func ReconcileHTTP( }.ReconcileCAAndHTTPCerts(ctx) if results.HasError() { _, err := results.Aggregate() - k8s.EmitErrorEvent(driver.Recorder(), err, &es, events.EventReconciliationError, "Certificate reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(driver.Recorder(), err, &es, events.EventReconciliationError, "Certificate reconciliation error: %v", err) return nil, results } diff --git a/pkg/controller/elasticsearch/elasticsearch_controller.go b/pkg/controller/elasticsearch/elasticsearch_controller.go index 68b998f552..40fe6dbd3d 100644 --- a/pkg/controller/elasticsearch/elasticsearch_controller.go +++ b/pkg/controller/elasticsearch/elasticsearch_controller.go @@ -194,7 +194,7 @@ func (r *ReconcileElasticsearch) Reconcile(ctx context.Context, request reconcil } else { log.Error(err, "Error while updating annotations", "namespace", es.Namespace, "es_name", es.Name) results.WithError(err) - k8s.EmitErrorEvent(r.recorder, err, &es, events.EventReconciliationError, "Reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, &es, events.EventReconciliationError, "Reconciliation error: %v", err) } } @@ -212,7 +212,7 @@ func (r *ReconcileElasticsearch) Reconcile(ctx context.Context, request reconcil log.V(1).Info("Conflict while updating status", "namespace", es.Namespace, "es_name", es.Name) return reconcile.Result{Requeue: true}, nil } - k8s.EmitErrorEvent(r.recorder, err, &es, events.EventReconciliationError, "Reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, &es, events.EventReconciliationError, "Reconciliation error: %v", err) } return results.WithError(err).Aggregate() } diff --git a/pkg/controller/elasticsearch/user/reconcile_test.go b/pkg/controller/elasticsearch/user/reconcile_test.go index ec103218cd..50183d4a67 100644 --- a/pkg/controller/elasticsearch/user/reconcile_test.go +++ b/pkg/controller/elasticsearch/user/reconcile_test.go @@ -96,6 +96,6 @@ func Test_aggregateRoles(t *testing.T) { c := k8s.NewFakeClient(sampleUserProvidedRolesSecret...) roles, err := aggregateRoles(context.Background(), c, sampleEsWithAuth, initDynamicWatches(), record.NewFakeRecorder(10)) require.NoError(t, err) - require.Len(t, roles, 54) + require.Len(t, roles, 55) require.Contains(t, roles, ProbeUserRole, ClusterManageRole, "role1", "role2") } diff --git a/pkg/controller/elasticsearch/user/roles.go b/pkg/controller/elasticsearch/user/roles.go index 4a26c9f26d..e84584d7b8 100644 --- a/pkg/controller/elasticsearch/user/roles.go +++ b/pkg/controller/elasticsearch/user/roles.go @@ -46,6 +46,8 @@ const ( FleetAdminUserRole = "eck_fleet_admin_user_role" + LogstashUserRole = "eck_logstash_user_role" + // V70 indicates version 7.0 V70 = "v70" @@ -173,6 +175,22 @@ var ( }, }, }, + LogstashUserRole: esclient.Role{ + Cluster: []string{ + "monitor", + "manage_ilm", + "read_ilm", + "manage_logstash_pipelines", + "manage_index_templates", + "cluster:admin/ingest/pipeline/get", + }, + Indices: []esclient.IndexRole{ + { + Names: []string{"logstash", "logstash-*", "ecs-logstash", "ecs-logstash-*", "logs-*", "metrics-*", "synthetics-*", "traces-*"}, + Privileges: []string{"manage", "write", "create_index", "read", "view_index_metadata"}, + }, + }, + }, } ) diff --git a/pkg/controller/elasticsearch/validation/validations.go b/pkg/controller/elasticsearch/validation/validations.go index cccfbb30ba..570e5ac387 100644 --- a/pkg/controller/elasticsearch/validation/validations.go +++ b/pkg/controller/elasticsearch/validation/validations.go @@ -330,7 +330,7 @@ func currentVersion(current esv1.Elasticsearch) (version.Version, *field.Error) } func validMonitoring(es esv1.Elasticsearch) field.ErrorList { - return stackmon.Validate(&es, es.Spec.Version) + return stackmon.Validate(&es, es.Spec.Version, stackmon.MinStackVersion) } func validAssociations(es esv1.Elasticsearch) field.ErrorList { diff --git a/pkg/controller/enterprisesearch/enterprisesearch_controller.go b/pkg/controller/enterprisesearch/enterprisesearch_controller.go index d343d41539..b9a8557f22 100644 --- a/pkg/controller/enterprisesearch/enterprisesearch_controller.go +++ b/pkg/controller/enterprisesearch/enterprisesearch_controller.go @@ -215,7 +215,7 @@ func (r *ReconcileEnterpriseSearch) doReconcile(ctx context.Context, ent entv1.E }.ReconcileCAAndHTTPCerts(ctx) if results.HasError() { _, err := results.Aggregate() - k8s.EmitErrorEvent(r.recorder, err, &ent, events.EventReconciliationError, "Certificate reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, &ent, events.EventReconciliationError, "Certificate reconciliation error: %v", err) return results, status } @@ -275,7 +275,7 @@ func (r *ReconcileEnterpriseSearch) validate(ctx context.Context, ent *entv1.Ent if err := ent.ValidateCreate(); err != nil { ulog.FromContext(ctx).Error(err, "Validation failed") - k8s.EmitErrorEvent(r.recorder, err, ent, events.EventReasonValidation, err.Error()) + k8s.MaybeEmitErrorEvent(r.recorder, err, ent, events.EventReasonValidation, err.Error()) return tracing.CaptureError(vctx, err) } diff --git a/pkg/controller/kibana/controller.go b/pkg/controller/kibana/controller.go index 47f722df4d..83b0fdb6d0 100644 --- a/pkg/controller/kibana/controller.go +++ b/pkg/controller/kibana/controller.go @@ -195,7 +195,7 @@ func (r *ReconcileKibana) doReconcile(ctx context.Context, request reconcile.Req results := driver.Reconcile(ctx, &state, kb, r.params) result, err = results.WithError(err).Aggregate() - k8s.EmitErrorEvent(r.recorder, err, kb, events.EventReconciliationError, "Reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, kb, events.EventReconciliationError, "Reconciliation error: %v", err) return result, err } @@ -205,7 +205,7 @@ func (r *ReconcileKibana) validate(ctx context.Context, kb *kbv1.Kibana) error { if err := kb.ValidateCreate(); err != nil { ulog.FromContext(ctx).Error(err, "Validation failed") - k8s.EmitErrorEvent(r.recorder, err, kb, events.EventReasonValidation, err.Error()) + k8s.MaybeEmitErrorEvent(r.recorder, err, kb, events.EventReasonValidation, err.Error()) return tracing.CaptureError(vctx, err) } diff --git a/pkg/controller/kibana/driver.go b/pkg/controller/kibana/driver.go index 1337139213..32f374941e 100644 --- a/pkg/controller/kibana/driver.go +++ b/pkg/controller/kibana/driver.go @@ -73,13 +73,13 @@ func newDriver( ) (*driver, error) { ver, err := version.Parse(kb.Spec.Version) if err != nil { - k8s.EmitErrorEvent(recorder, err, kb, events.EventReasonValidation, "Invalid version '%s': %v", kb.Spec.Version, err) + k8s.MaybeEmitErrorEvent(recorder, err, kb, events.EventReasonValidation, "Invalid version '%s': %v", kb.Spec.Version, err) return nil, err } if !ver.GTE(minSupportedVersion) { err := pkgerrors.Errorf("unsupported Kibana version: %s", ver) - k8s.EmitErrorEvent(recorder, err, kb, events.EventReasonValidation, "Unsupported Kibana version") + k8s.MaybeEmitErrorEvent(recorder, err, kb, events.EventReasonValidation, "Unsupported Kibana version") return nil, err } @@ -135,7 +135,7 @@ func (d *driver) Reconcile( }.ReconcileCAAndHTTPCerts(ctx) if results.HasError() { _, err := results.Aggregate() - k8s.EmitErrorEvent(d.Recorder(), err, kb, events.EventReconciliationError, "Certificate reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(d.Recorder(), err, kb, events.EventReconciliationError, "Certificate reconciliation error: %v", err) return results } diff --git a/pkg/controller/logstash/config.go b/pkg/controller/logstash/config.go new file mode 100644 index 0000000000..e29d213ff0 --- /dev/null +++ b/pkg/controller/logstash/config.go @@ -0,0 +1,83 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "hash" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/labels" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/settings" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/tracing" +) + +func reconcileConfig(params Params, configHash hash.Hash) error { + defer tracing.Span(¶ms.Context)() + + cfgBytes, err := buildConfig(params) + if err != nil { + return err + } + + expected := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: params.Logstash.Namespace, + Name: logstashv1alpha1.ConfigSecretName(params.Logstash.Name), + Labels: labels.AddCredentialsLabel(params.Logstash.GetIdentityLabels()), + }, + Data: map[string][]byte{ + LogstashConfigFileName: cfgBytes, + }, + } + + if _, err = reconciler.ReconcileSecret(params.Context, params.Client, expected, ¶ms.Logstash); err != nil { + return err + } + + _, _ = configHash.Write(cfgBytes) + + return nil +} + +func buildConfig(params Params) ([]byte, error) { + userProvidedCfg, err := getUserConfig(params) + if err != nil { + return nil, err + } + + cfg := defaultConfig() + + // merge with user settings last so they take precedence + if err := cfg.MergeWith(userProvidedCfg); err != nil { + return nil, err + } + + return cfg.Render() +} + +// getUserConfig extracts the config either from the spec `config` field or from the Secret referenced by spec +// `configRef` field. +func getUserConfig(params Params) (*settings.CanonicalConfig, error) { + if params.Logstash.Spec.Config != nil { + return settings.NewCanonicalConfigFrom(params.Logstash.Spec.Config.Data) + } + return common.ParseConfigRef(params, ¶ms.Logstash, params.Logstash.Spec.ConfigRef, LogstashConfigFileName) +} + +func defaultConfig() *settings.CanonicalConfig { + settingsMap := map[string]interface{}{ + // Set 'api.http.host' by default to `0.0.0.0` for readiness probe to work. + "api.http.host": "0.0.0.0", + // Set `config.reload.automatic` to `true` to enable pipeline reloads by default + "config.reload.automatic": true, + } + + return settings.MustCanonicalConfig(settingsMap) +} diff --git a/pkg/controller/logstash/config_test.go b/pkg/controller/logstash/config_test.go new file mode 100644 index 0000000000..1fc33456f4 --- /dev/null +++ b/pkg/controller/logstash/config_test.go @@ -0,0 +1,162 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func Test_newConfig(t *testing.T) { + type args struct { + runtimeObjs []runtime.Object + logstash v1alpha1.Logstash + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "no user config", + args: args{ + runtimeObjs: nil, + logstash: v1alpha1.Logstash{}, + }, + want: `api: + http: + host: 0.0.0.0 +config: + reload: + automatic: true +`, + wantErr: false, + }, + { + name: "inline user config", + args: args{ + runtimeObjs: nil, + logstash: v1alpha1.Logstash{ + Spec: v1alpha1.LogstashSpec{Config: &commonv1.Config{Data: map[string]interface{}{ + "log.level": "debug", + }}}, + }, + }, + want: `api: + http: + host: 0.0.0.0 +config: + reload: + automatic: true +log: + level: debug +`, + wantErr: false, + }, + { + name: "with configRef", + args: args{ + runtimeObjs: []runtime.Object{secretWithConfig("cfg", []byte("log.level: debug"))}, + logstash: logstashWithConfigRef("cfg", nil), + }, + want: `api: + http: + host: 0.0.0.0 +config: + reload: + automatic: true +log: + level: debug +`, + wantErr: false, + }, + { + name: "config takes precedence", + args: args{ + runtimeObjs: []runtime.Object{secretWithConfig("cfg", []byte("log.level: debug"))}, + logstash: logstashWithConfigRef("cfg", &commonv1.Config{Data: map[string]interface{}{ + "log.level": "warn", + }}), + }, + want: `api: + http: + host: 0.0.0.0 +config: + reload: + automatic: true +log: + level: warn +`, + wantErr: false, + }, + { + name: "non existing configRef", + args: args{ + logstash: logstashWithConfigRef("cfg", nil), + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + params := Params{ + Context: context.Background(), + Client: k8s.NewFakeClient(tt.args.runtimeObjs...), + EventRecorder: record.NewFakeRecorder(10), + Watches: watches.NewDynamicWatches(), + Logstash: tt.args.logstash, + } + + got, err := buildConfig(params) + if (err != nil) != tt.wantErr { + t.Errorf("newConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return // no point in checking the config contents + } + require.NoError(t, err) + if string(got) != tt.want { + t.Errorf("newConfig() got = \n%v\n, want \n%v\n", string(got), tt.want) + } + }) + } +} + +func secretWithConfig(name string, cfg []byte) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: name, + }, + Data: map[string][]byte{ + LogstashConfigFileName: cfg, + }, + } +} + +func logstashWithConfigRef(name string, cfg *commonv1.Config) v1alpha1.Logstash { + return v1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ls", + Namespace: "ns", + }, + Spec: v1alpha1.LogstashSpec{ + Config: cfg, + ConfigRef: &commonv1.ConfigSource{SecretRef: commonv1.SecretRef{SecretName: name}}}, + } +} diff --git a/pkg/controller/logstash/driver.go b/pkg/controller/logstash/driver.go new file mode 100644 index 0000000000..7face4fcdc --- /dev/null +++ b/pkg/controller/logstash/driver.go @@ -0,0 +1,103 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + + "hash/fnv" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/operator" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/tracing" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/stackmon" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/log" +) + +// Params are a set of parameters used during internal reconciliation of Logstash. +type Params struct { + Context context.Context + + Client k8s.Client + EventRecorder record.EventRecorder + Watches watches.DynamicWatches + + Logstash logstashv1alpha1.Logstash + Status logstashv1alpha1.LogstashStatus + + OperatorParams operator.Parameters +} + +// K8sClient returns the Kubernetes client. +func (p Params) K8sClient() k8s.Client { + return p.Client +} + +// Recorder returns the Kubernetes event recorder. +func (p Params) Recorder() record.EventRecorder { + return p.EventRecorder +} + +// DynamicWatches returns the set of stateful dynamic watches used during reconciliation. +func (p Params) DynamicWatches() watches.DynamicWatches { + return p.Watches +} + +// GetPodTemplate returns the configured pod template for the associated Elastic Logstash. +func (p *Params) GetPodTemplate() corev1.PodTemplateSpec { + return p.Logstash.Spec.PodTemplate +} + +// Logger returns the configured logger for use during reconciliation. +func (p *Params) Logger() logr.Logger { + return log.FromContext(p.Context) +} + +func newStatus(logstash logstashv1alpha1.Logstash) logstashv1alpha1.LogstashStatus { + status := logstash.Status + status.ObservedGeneration = logstash.Generation + return status +} + +func internalReconcile(params Params) (*reconciler.Results, logstashv1alpha1.LogstashStatus) { + defer tracing.Span(¶ms.Context)() + results := reconciler.NewResult(params.Context) + + _, err := reconcileServices(params) + if err != nil { + return results.WithError(err), params.Status + } + + configHash := fnv.New32a() + + // reconcile beats config secrets if Stack Monitoring is defined + if err := stackmon.ReconcileConfigSecrets(params.Context, params.Client, params.Logstash); err != nil { + return results.WithError(err), params.Status + } + + if err := reconcileConfig(params, configHash); err != nil { + return results.WithError(err), params.Status + } + + // We intentionally DO NOT pass the configHash here. We don't want to consider the pipeline definitions in the + // hash of the config to ensure that a pipeline change does not automatically trigger a restart + // of the pod, but allows Logstash's automatic reload of pipelines to take place + if err := reconcilePipeline(params); err != nil { + return results.WithError(err), params.Status + } + + podTemplate, err := buildPodTemplate(params, configHash) + if err != nil { + return results.WithError(err), params.Status + } + return reconcileStatefulSet(params, podTemplate) +} diff --git a/pkg/controller/logstash/env.go b/pkg/controller/logstash/env.go new file mode 100644 index 0000000000..bece7fa127 --- /dev/null +++ b/pkg/controller/logstash/env.go @@ -0,0 +1,80 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "errors" + "path/filepath" + "strings" + + corev1 "k8s.io/api/core/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/association" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/certificates" +) + +func buildEnv(params Params, esAssociations []commonv1.Association) ([]corev1.EnvVar, error) { + var envs []corev1.EnvVar //nolint:prealloc + for _, assoc := range esAssociations { + assocConf, err := assoc.AssociationConf() + if err != nil { + return nil, err + } + + credentials, err := association.ElasticsearchAuthSettings(params.Context, params.Client, assoc) + if err != nil { + return nil, err + } + + clusterName, err := getClusterName(assoc) + if err != nil { + return nil, err + } + + normalizedClusterName := normalize(clusterName) + + envs = append(envs, createEnvVar(normalizedClusterName+"_ES_HOSTS", assocConf.GetURL())) + envs = append(envs, createEnvVar(normalizedClusterName+"_ES_USER", credentials.Username)) + envs = append(envs, corev1.EnvVar{ + Name: normalizedClusterName + "_ES_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: assocConf.AuthSecretName, + }, + Key: assocConf.AuthSecretKey, + }, + }, + }) + + if assocConf.GetCACertProvided() { + caPath := filepath.Join(certificatesDir(assoc), certificates.CAFileName) + envs = append(envs, createEnvVar(normalizedClusterName+"_ES_SSL_CERTIFICATE_AUTHORITY", caPath)) + } + } + + return envs, nil +} + +func getClusterName(assoc commonv1.Association) (string, error) { + lses, ok := assoc.(*v1alpha1.LogstashESAssociation) + if !ok { + return "", errors.New("cannot cast association to LogstashESAssociation") + } + return lses.ClusterName, nil +} + +func normalize(nn string) string { + return strings.ToUpper(strings.ReplaceAll(nn, "-", "_")) +} + +func createEnvVar(key string, value string) corev1.EnvVar { + return corev1.EnvVar{ + Name: key, + Value: value, + } +} diff --git a/pkg/controller/logstash/env_test.go b/pkg/controller/logstash/env_test.go new file mode 100644 index 0000000000..b58e8c6217 --- /dev/null +++ b/pkg/controller/logstash/env_test.go @@ -0,0 +1,164 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func Test_getEnvVars(t *testing.T) { + fakeLogstashUserSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "logstash-sample-default-elasticsearch-sample-logstash-user", Namespace: "default"}, + Data: map[string][]byte{"default-logstash-sample-default-elasticsearch-sample-logstash-user": []byte("1234567890")}, + } + + fakeExternalEsSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "external-cloud-es-ref", Namespace: "default"}, + Data: map[string][]byte{ + "url": []byte("https://some.gcp.cloud.es.io"), + "username": []byte("fake_user"), + "password": []byte("fake_password"), + }, + } + + params := Params{ + Logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + ElasticsearchRefs: []logstashv1alpha1.ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{Name: "elasticsearch-sample", Namespace: "default"}, + ClusterName: "production", + }, + }, + }, + }, + Client: k8s.NewFakeClient(&fakeLogstashUserSecret, &fakeExternalEsSecret), + Context: context.Background(), + } + + for _, tt := range []struct { + name string + params Params + setAssocConfs func(assocs []commonv1.Association) + wantEnvs []corev1.EnvVar + }{ + { + name: "no es ref", + params: Params{ + Logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{}, + }, + Client: k8s.NewFakeClient(), + Context: context.Background(), + }, + setAssocConfs: func(assocs []commonv1.Association) {}, + wantEnvs: []corev1.EnvVar(nil), + }, + { + name: "es ref", + params: params, + setAssocConfs: func(assocs []commonv1.Association) { + assocs[0].SetAssociationConf(&commonv1.AssociationConf{ + AuthSecretName: "logstash-sample-default-elasticsearch-sample-logstash-user", + AuthSecretKey: "default-logstash-sample-default-elasticsearch-sample-logstash-user", + CACertProvided: true, + CASecretName: "logstash-sample-logstash-es-default-elasticsearch-sample-ca", + URL: "https://elasticsearch-sample-es-http.default.svc:9200", + Version: "8.7.0", + }) + assocs[0].SetNamespace("default") + }, + wantEnvs: []corev1.EnvVar{ + {Name: "PRODUCTION_ES_HOSTS", Value: "https://elasticsearch-sample-es-http.default.svc:9200"}, + {Name: "PRODUCTION_ES_USER", Value: "default-logstash-sample-default-elasticsearch-sample-logstash-user"}, + {Name: "PRODUCTION_ES_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "logstash-sample-default-elasticsearch-sample-logstash-user", + }, + Key: "default-logstash-sample-default-elasticsearch-sample-logstash-user", + }, + }, + }, + {Name: "PRODUCTION_ES_SSL_CERTIFICATE_AUTHORITY", Value: "/mnt/elastic-internal/elasticsearch-association/default/elasticsearch-sample/certs/ca.crt"}, + }, + }, + { + name: "es ref without tls", + params: params, + setAssocConfs: func(assocs []commonv1.Association) { + assocs[0].SetAssociationConf(&commonv1.AssociationConf{ + AuthSecretName: "logstash-sample-default-elasticsearch-sample-logstash-user", + AuthSecretKey: "default-logstash-sample-default-elasticsearch-sample-logstash-user", + CACertProvided: false, + URL: "http://elasticsearch-sample-es-http.default.svc:9200", + Version: "8.7.0", + }) + assocs[0].SetNamespace("default") + }, + wantEnvs: []corev1.EnvVar{ + {Name: "PRODUCTION_ES_HOSTS", Value: "http://elasticsearch-sample-es-http.default.svc:9200"}, + {Name: "PRODUCTION_ES_USER", Value: "default-logstash-sample-default-elasticsearch-sample-logstash-user"}, + {Name: "PRODUCTION_ES_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "logstash-sample-default-elasticsearch-sample-logstash-user", + }, + Key: "default-logstash-sample-default-elasticsearch-sample-logstash-user", + }, + }, + }, + }, + }, + { + name: "es ref with secretName", + params: params, + setAssocConfs: func(assocs []commonv1.Association) { + assocs[0].SetAssociationConf(&commonv1.AssociationConf{ + AuthSecretName: "external-cloud-es-ref", + AuthSecretKey: "password", + CACertProvided: false, + CASecretName: "", + URL: "https://some.gcp.cloud.es.io", + Version: "8.7.0", + }) + assocs[0].SetNamespace("default") + }, + wantEnvs: []corev1.EnvVar{ + {Name: "PRODUCTION_ES_HOSTS", Value: "https://some.gcp.cloud.es.io"}, + {Name: "PRODUCTION_ES_USER", Value: "fake_user"}, + {Name: "PRODUCTION_ES_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "external-cloud-es-ref", + }, + Key: "password", + }, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + assocs := tt.params.Logstash.GetAssociations() + tt.setAssocConfs(assocs) + envs, err := buildEnv(params, assocs) + require.NoError(t, err) + require.Equal(t, tt.wantEnvs, envs) + }) + } +} diff --git a/pkg/controller/logstash/initcontainer.go b/pkg/controller/logstash/initcontainer.go new file mode 100644 index 0000000000..dd6051d9b1 --- /dev/null +++ b/pkg/controller/logstash/initcontainer.go @@ -0,0 +1,75 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" +) + +const ( + InitConfigContainerName = "logstash-internal-init-config" + + // InitConfigScript is a small bash script to prepare the logstash configuration directory + InitConfigScript = `#!/usr/bin/env bash +set -eu + +init_config_initialized_flag=` + InitContainerConfigVolumeMountPath + `/elastic-internal-init-config.ok + +if [[ -f "${init_config_initialized_flag}" ]]; then + echo "Logstash configuration already initialized." + exit 0 +fi + +echo "Setup Logstash configuration" + +mount_path=` + InitContainerConfigVolumeMountPath + ` + +cp -f /usr/share/logstash/config/*.* "$mount_path" + +ln -sf ` + InternalConfigVolumeMountPath + `/logstash.yml $mount_path +ln -sf ` + InternalPipelineVolumeMountPath + `/pipelines.yml $mount_path + +touch "${init_config_initialized_flag}" +echo "Logstash configuration successfully prepared." +` +) + +// initConfigContainer returns an init container that executes a bash script to prepare the logstash config directory. +// This copies files from the `config` folder of the docker image, and creates symlinks for the `logstash.yml` and +// `pipelines.yml` files created by the operator into a shared config folder to be used by the main logstash container. +// This enables dynamic reloads for `pipelines.yml`. +func initConfigContainer(ls logstashv1alpha1.Logstash) corev1.Container { + privileged := false + + return corev1.Container{ + // Image will be inherited from pod template defaults + ImagePullPolicy: corev1.PullIfNotPresent, + Name: InitConfigContainerName, + SecurityContext: &corev1.SecurityContext{ + Privileged: &privileged, + }, + Command: []string{"/usr/bin/env", "bash", "-c", InitConfigScript}, + VolumeMounts: []corev1.VolumeMount{ + ConfigSharedVolume.InitContainerVolumeMount(), + ConfigVolume(ls).VolumeMount(), + PipelineVolume(ls).VolumeMount(), + }, + + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: resource.MustParse("50Mi"), + corev1.ResourceCPU: resource.MustParse("0.1"), + }, + Limits: map[corev1.ResourceName]resource.Quantity{ + // Memory limit should be at least 12582912 when running with CRI-O + corev1.ResourceMemory: resource.MustParse("50Mi"), + corev1.ResourceCPU: resource.MustParse("0.1"), + }, + }, + } +} diff --git a/pkg/controller/logstash/labels.go b/pkg/controller/logstash/labels.go new file mode 100644 index 0000000000..d1d9d58049 --- /dev/null +++ b/pkg/controller/logstash/labels.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" +) + +const ( + // TypeLabelValue represents the Logstash type. + TypeLabelValue = "logstash" + + // NameLabelName used to represent a Logstash in k8s resources + NameLabelName = "logstash.k8s.elastic.co/name" + + // NamespaceLabelName used to represent a Logstash in k8s resources + NamespaceLabelName = "logstash.k8s.elastic.co/namespace" +) + +// NewLabels returns the set of common labels for an Elastic Logstash. +func NewLabels(logstash logstashv1alpha1.Logstash) map[string]string { + return map[string]string{ + commonv1.TypeLabelName: TypeLabelValue, + NameLabelName: logstash.Name, + } +} + +// NewLabelSelectorForLogstash returns a labels.Selector that matches the labels as constructed by NewLabels +func NewLabelSelectorForLogstash(ls logstashv1alpha1.Logstash) client.MatchingLabels { + return client.MatchingLabels(map[string]string{commonv1.TypeLabelName: TypeLabelValue, NameLabelName: ls.Name}) +} diff --git a/pkg/controller/logstash/logstash_controller.go b/pkg/controller/logstash/logstash_controller.go new file mode 100644 index 0000000000..0abf64419d --- /dev/null +++ b/pkg/controller/logstash/logstash_controller.go @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/association" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/events" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/keystore" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/operator" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/tracing" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/pipelines" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + logconf "github.com/elastic/cloud-on-k8s/v2/pkg/utils/log" +) + +const ( + controllerName = "logstash-controller" +) + +// Add creates a new Logstash Controller and adds it to the Manager with default RBAC. +// The Manager will set fields on the Controller and Start it when the Manager is Started. +func Add(mgr manager.Manager, params operator.Parameters) error { + r := newReconciler(mgr, params) + c, err := common.NewController(mgr, controllerName, r, params) + if err != nil { + return err + } + return addWatches(c, r) +} + +// newReconciler returns a new reconcile.Reconciler. +func newReconciler(mgr manager.Manager, params operator.Parameters) *ReconcileLogstash { + client := mgr.GetClient() + return &ReconcileLogstash{ + Client: client, + recorder: mgr.GetEventRecorderFor(controllerName), + dynamicWatches: watches.NewDynamicWatches(), + Parameters: params, + } +} + +// addWatches adds watches for all resources this controller cares about +func addWatches(c controller.Controller, r *ReconcileLogstash) error { + // Watch for changes to Logstash + if err := c.Watch(&source.Kind{Type: &logstashv1alpha1.Logstash{}}, &handler.EnqueueRequestForObject{}); err != nil { + return err + } + + // Watch StatefulSets + if err := c.Watch( + &source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &logstashv1alpha1.Logstash{}, + }, + ); err != nil { + return err + } + + // Watch Pods, to ensure `status.version` is correctly reconciled on any change. + // Watching StatefulSets only may lead to missing some events. + if err := watches.WatchPods(c, NameLabelName); err != nil { + return err + } + + // Watch services + if err := c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &logstashv1alpha1.Logstash{}, + }); err != nil { + return err + } + + // Watch owned and soft-owned secrets + if err := c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &logstashv1alpha1.Logstash{}, + }); err != nil { + return err + } + if err := watches.WatchSoftOwnedSecrets(c, logstashv1alpha1.Kind); err != nil { + return err + } + + // Watch dynamically referenced Secrets + return c.Watch(&source.Kind{Type: &corev1.Secret{}}, r.dynamicWatches.Secrets) +} + +var _ reconcile.Reconciler = &ReconcileLogstash{} + +// ReconcileLogstash reconciles a Logstash object +type ReconcileLogstash struct { + k8s.Client + recorder record.EventRecorder + dynamicWatches watches.DynamicWatches + operator.Parameters + // iteration is the number of times this controller has run its Reconcile method + iteration uint64 +} + +// Reconcile reads that state of the cluster for a Logstash object and makes changes based on the state read +// and what is in the Logstash.Spec +// Automatically generate RBAC rules to allow the Controller to read and write StatefulSets +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=logstash.k8s.elastic.co,resources=logstashes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=logstash.k8s.elastic.co,resources=logstashes/status,verbs=get;update;patch +func (r *ReconcileLogstash) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + ctx = common.NewReconciliationContext(ctx, &r.iteration, r.Tracer, controllerName, "logstash_name", request) + defer common.LogReconciliationRun(logconf.FromContext(ctx))() + defer tracing.EndContextTransaction(ctx) + + logstash := &logstashv1alpha1.Logstash{} + if err := r.Client.Get(ctx, request.NamespacedName, logstash); err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, r.onDelete(ctx, request.NamespacedName) + } + return reconcile.Result{}, tracing.CaptureError(ctx, err) + } + + if common.IsUnmanaged(ctx, logstash) { + logconf.FromContext(ctx).Info("Object is currently not managed by this controller. Skipping reconciliation") + return reconcile.Result{}, nil + } + + if logstash.IsMarkedForDeletion() { + return reconcile.Result{}, nil + } + + results, status := r.doReconcile(ctx, *logstash) + + if err := updateStatus(ctx, *logstash, r.Client, status); err != nil { + if apierrors.IsConflict(err) { + return results.WithResult(reconcile.Result{Requeue: true}).Aggregate() + } + results = results.WithError(err) + } + + result, err := results.Aggregate() + k8s.MaybeEmitErrorEvent(r.recorder, err, logstash, events.EventReconciliationError, "Reconciliation error: %v", err) + + return result, err +} + +func (r *ReconcileLogstash) doReconcile(ctx context.Context, logstash logstashv1alpha1.Logstash) (*reconciler.Results, logstashv1alpha1.LogstashStatus) { + defer tracing.Span(&ctx)() + results := reconciler.NewResult(ctx) + status := newStatus(logstash) + + areAssocsConfigured, err := association.AreConfiguredIfSet(ctx, logstash.GetAssociations(), r.recorder) + if err != nil { + return results.WithError(err), status + } + if !areAssocsConfigured { + return results, status + } + + // Run basic validations as a fallback in case webhook is disabled. + if err := r.validate(ctx, logstash); err != nil { + results = results.WithError(err) + return results, status + } + + return internalReconcile(Params{ + Context: ctx, + Client: r.Client, + EventRecorder: r.recorder, + Watches: r.dynamicWatches, + Logstash: logstash, + Status: status, + OperatorParams: r.Parameters, + }) +} + +func (r *ReconcileLogstash) validate(ctx context.Context, logstash logstashv1alpha1.Logstash) error { + defer tracing.Span(&ctx)() + + // Run create validations only as update validations require old object which we don't have here. + if err := logstash.ValidateCreate(); err != nil { + logconf.FromContext(ctx).Error(err, "Validation failed") + k8s.MaybeEmitErrorEvent(r.recorder, err, &logstash, events.EventReasonValidation, err.Error()) + return tracing.CaptureError(ctx, err) + } + return nil +} + +func (r *ReconcileLogstash) onDelete(ctx context.Context, obj types.NamespacedName) error { + r.dynamicWatches.Secrets.RemoveHandlerForKey(keystore.SecureSettingsWatchName(obj)) + r.dynamicWatches.Secrets.RemoveHandlerForKey(common.ConfigRefWatchName(obj)) + r.dynamicWatches.Secrets.RemoveHandlerForKey(pipelines.RefWatchName(obj)) + return reconciler.GarbageCollectSoftOwnedSecrets(ctx, r.Client, obj, logstashv1alpha1.Kind) +} diff --git a/pkg/controller/logstash/logstash_controller_test.go b/pkg/controller/logstash/logstash_controller_test.go new file mode 100644 index 0000000000..d8647f42e8 --- /dev/null +++ b/pkg/controller/logstash/logstash_controller_test.go @@ -0,0 +1,465 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/comparison" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/hash" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func newReconcileLogstash(objs ...runtime.Object) *ReconcileLogstash { + r := &ReconcileLogstash{ + Client: k8s.NewFakeClient(objs...), + recorder: record.NewFakeRecorder(100), + dynamicWatches: watches.NewDynamicWatches(), + } + return r +} + +func TestReconcileLogstash_Reconcile(t *testing.T) { + defaultLabels := (&logstashv1alpha1.Logstash{ObjectMeta: metav1.ObjectMeta{Name: "testLogstash"}}).GetIdentityLabels() + tests := []struct { + name string + objs []runtime.Object + request reconcile.Request + want reconcile.Result + expected logstashv1alpha1.Logstash + expectedObjects expectedObjects + wantErr bool + }{ + { + name: "valid unmanaged Logstash does not increment observedGeneration", + objs: []runtime.Object{ + &logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 1, + Annotations: map[string]string{ + common.ManagedAnnotation: "false", + }, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + }, + Status: logstashv1alpha1.LogstashStatus{ + ObservedGeneration: 1, + }, + }, + }, + request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "testLogstash", + }, + }, + want: reconcile.Result{}, + expected: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 1, + Annotations: map[string]string{ + common.ManagedAnnotation: "false", + }, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + }, + Status: logstashv1alpha1.LogstashStatus{ + ObservedGeneration: 1, + }, + }, + expectedObjects: []expectedObject{}, + wantErr: false, + }, + { + name: "too long name fails validation, and updates observedGeneration", + objs: []runtime.Object{ + &logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstashwithtoolongofanamereallylongname", + Namespace: "test", + Generation: 2, + }, + Status: logstashv1alpha1.LogstashStatus{ + ObservedGeneration: 1, + }, + }, + }, + request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "testLogstashwithtoolongofanamereallylongname", + }, + }, + want: reconcile.Result{}, + expected: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstashwithtoolongofanamereallylongname", + Namespace: "test", + Generation: 2, + }, + Status: logstashv1alpha1.LogstashStatus{ + ObservedGeneration: 2, + }, + }, + expectedObjects: []expectedObject{}, + wantErr: true, + }, + { + name: "Logstash with ready StatefulSet and Pod updates status and creates secrets and service", + objs: []runtime.Object{ + &logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 2, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Count: 1, + }, + Status: logstashv1alpha1.LogstashStatus{ + ObservedGeneration: 1, + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash-ls", + Namespace: "test", + Labels: addLabel(defaultLabels, hash.TemplateHashLabelName, "3145706383"), + }, + Status: appsv1.StatefulSetStatus{ + AvailableReplicas: 1, + Replicas: 1, + ReadyReplicas: 1, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash-ls", + Namespace: "test", + Generation: 2, + Labels: map[string]string{NameLabelName: "testLogstash", VersionLabelName: "8.6.1"}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "testLogstash", + }, + }, + want: reconcile.Result{}, + expectedObjects: []expectedObject{ + { + t: &corev1.Service{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-api"}, + }, + { + t: &corev1.Secret{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-config"}, + }, + { + t: &corev1.Secret{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-pipeline"}, + }, + }, + + expected: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 2, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Count: 1, + }, + Status: logstashv1alpha1.LogstashStatus{ + Version: "8.6.1", + ExpectedNodes: 1, + AvailableNodes: 1, + ObservedGeneration: 2, + }, + }, + wantErr: false, + }, + { + name: "Logstash with a custom service creates secrets and service", + objs: []runtime.Object{ + &logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 2, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Count: 1, + Services: []logstashv1alpha1.LogstashService{{ + Name: "test", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Protocol: "TCP", Port: 9500}, + }, + }, + }, + }}, + }, + Status: logstashv1alpha1.LogstashStatus{ + ObservedGeneration: 1, + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash-ls", + Namespace: "test", + Labels: addLabel(defaultLabels, hash.TemplateHashLabelName, "3145706383"), + }, + Status: appsv1.StatefulSetStatus{ + AvailableReplicas: 1, + Replicas: 1, + ReadyReplicas: 1, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash-ls", + Namespace: "test", + Generation: 2, + Labels: map[string]string{NameLabelName: "testLogstash", VersionLabelName: "8.6.1"}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "testLogstash", + }, + }, + want: reconcile.Result{}, + expectedObjects: []expectedObject{ + { + t: &corev1.Service{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-api"}, + }, + { + t: &corev1.Service{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-test"}, + }, + { + t: &corev1.Secret{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-config"}, + }, + { + t: &corev1.Secret{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-pipeline"}, + }, + }, + + expected: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 2, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Count: 1, + Services: []logstashv1alpha1.LogstashService{{ + Name: "test", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Protocol: "TCP", Port: 9500}, + }, + }, + }, + }}, + }, + Status: logstashv1alpha1.LogstashStatus{ + Version: "8.6.1", + ExpectedNodes: 1, + AvailableNodes: 1, + ObservedGeneration: 2, + }, + }, + wantErr: false, + }, + { + name: "Logstash with a service with no port creates secrets and service", + objs: []runtime.Object{ + &logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 2, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Count: 1, + Services: []logstashv1alpha1.LogstashService{{ + Name: "api", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: nil, + }, + }, + }}, + }, + Status: logstashv1alpha1.LogstashStatus{ + ObservedGeneration: 1, + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash-ls", + Namespace: "test", + Labels: addLabel(defaultLabels, hash.TemplateHashLabelName, "3145706383"), + }, + Status: appsv1.StatefulSetStatus{ + AvailableReplicas: 1, + Replicas: 1, + ReadyReplicas: 1, + }, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash-ls", + Namespace: "test", + Generation: 2, + Labels: map[string]string{NameLabelName: "testLogstash", VersionLabelName: "8.6.1"}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: "test", + Name: "testLogstash", + }, + }, + want: reconcile.Result{}, + expectedObjects: []expectedObject{ + { + t: &corev1.Service{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-api"}, + }, + { + t: &corev1.Secret{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-config"}, + }, + { + t: &corev1.Secret{}, + name: types.NamespacedName{Namespace: "test", Name: "testLogstash-ls-pipeline"}, + }, + }, + + expected: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testLogstash", + Namespace: "test", + Generation: 2, + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Count: 1, + Services: []logstashv1alpha1.LogstashService{{ + Name: "api", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: nil, + }, + }, + }}, + }, + Status: logstashv1alpha1.LogstashStatus{ + Version: "8.6.1", + ExpectedNodes: 1, + AvailableNodes: 1, + ObservedGeneration: 2, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := newReconcileLogstash(tt.objs...) + got, err := r.Reconcile(context.Background(), tt.request) + if (err != nil) != tt.wantErr { + t.Errorf("ReconcileLogstash.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ReconcileLogstash.Reconcile() = %v, want %v", got, tt.want) + } + + var Logstash logstashv1alpha1.Logstash + if err := r.Client.Get(context.Background(), tt.request.NamespacedName, &Logstash); err != nil { + t.Error(err) + return + } + tt.expectedObjects.assertExist(t, r.Client) + comparison.AssertEqual(t, &Logstash, &tt.expected) + }) + } +} + +func addLabel(labels map[string]string, key, value string) map[string]string { + newLabels := make(map[string]string, len(labels)) + for k, v := range labels { + newLabels[k] = v + } + newLabels[key] = value + return newLabels +} + +type expectedObject struct { + t client.Object + name types.NamespacedName +} + +type expectedObjects []expectedObject + +func (e expectedObjects) assertExist(t *testing.T, k8s client.Client) { + t.Helper() + for _, o := range e { + obj := o.t.DeepCopyObject().(client.Object) //nolint:forcetypeassert + assert.NoError(t, k8s.Get(context.Background(), o.name, obj), "Expected object not found: %s", o.name) + } +} \ No newline at end of file diff --git a/pkg/controller/logstash/network/ports.go b/pkg/controller/logstash/network/ports.go new file mode 100644 index 0000000000..197dae5249 --- /dev/null +++ b/pkg/controller/logstash/network/ports.go @@ -0,0 +1,10 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package network + +const ( + // HTTPPort is the (default) API port used by Logstash + HTTPPort = 9600 +) diff --git a/pkg/controller/logstash/pipeline.go b/pkg/controller/logstash/pipeline.go new file mode 100644 index 0000000000..8aee3add3d --- /dev/null +++ b/pkg/controller/logstash/pipeline.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/annotation" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/labels" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/tracing" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/pipelines" +) + +func reconcilePipeline(params Params) error { + defer tracing.Span(¶ms.Context)() + + cfgBytes, err := buildPipeline(params) + if err != nil { + return err + } + + expected := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: params.Logstash.Namespace, + Name: logstashv1alpha1.PipelineSecretName(params.Logstash.Name), + Labels: labels.AddCredentialsLabel(NewLabels(params.Logstash)), + }, + Data: map[string][]byte{ + PipelineFileName: cfgBytes, + }, + } + + if _, err := reconciler.ReconcileSecret(params.Context, params.Client, expected, ¶ms.Logstash, + reconciler.WithPostUpdate(func() { + annotation.MarkPodsAsUpdated(params.Context, params.Client, + client.InNamespace(params.Logstash.Namespace), + NewLabelSelectorForLogstash(params.Logstash), + ) + }), + ); err != nil { + return err + } + return nil +} + +func buildPipeline(params Params) ([]byte, error) { + userProvidedCfg, err := getUserPipeline(params) + if err != nil { + return nil, err + } + + if userProvidedCfg != nil { + return userProvidedCfg.Render() + } + + cfg := defaultPipeline + return cfg.Render() +} + +// getUserPipeline extracts the pipeline either from the spec `pipeline` field or from the Secret referenced by spec +// `pipelineRef` field. +func getUserPipeline(params Params) (*pipelines.Config, error) { + if params.Logstash.Spec.Pipelines != nil { + pipes := make([]map[string]interface{}, 0, len(params.Logstash.Spec.Pipelines)) + for _, p := range params.Logstash.Spec.Pipelines { + pipes = append(pipes, p.Data) + } + + return pipelines.FromSpec(pipes) + } + return pipelines.ParsePipelinesRef(params, ¶ms.Logstash, params.Logstash.Spec.PipelinesRef, PipelineFileName) +} + +var ( + defaultPipeline = pipelines.MustFromSpec([]map[string]string{ + { + "pipeline.id": "main", + "path.config": "/usr/share/logstash/pipeline", + }, + }) +) diff --git a/pkg/controller/logstash/pipeline_test.go b/pkg/controller/logstash/pipeline_test.go new file mode 100644 index 0000000000..59d28a5bf2 --- /dev/null +++ b/pkg/controller/logstash/pipeline_test.go @@ -0,0 +1,124 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/pipelines" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func Test_buildPipeline(t *testing.T) { + for _, tt := range []struct { + name string + pipelines []commonv1.Config + pipelinesRef *commonv1.ConfigSource + client k8s.Client + want *pipelines.Config + wantErr bool + }{ + { + name: "no user pipeline", + want: defaultPipeline, + }, + { + name: "pipeline populated", + pipelines: []commonv1.Config{ + {Data: map[string]interface{}{"pipeline.id": "main"}}, + }, + want: pipelines.MustParse([]byte(`- "pipeline.id": "main"`)), + }, + { + name: "pipelinesref populated - no secret", + pipelinesRef: &commonv1.ConfigSource{ + SecretRef: commonv1.SecretRef{ + SecretName: "my-secret-pipeline", + }, + }, + client: k8s.NewFakeClient(), + want: pipelines.EmptyConfig(), + wantErr: true, + }, + { + name: "pipelinesref populated - no secret key", + pipelinesRef: &commonv1.ConfigSource{ + SecretRef: commonv1.SecretRef{ + SecretName: "my-secret-pipeline", + }, + }, + client: k8s.NewFakeClient(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret-pipeline", + }, + }), + want: pipelines.EmptyConfig(), + wantErr: true, + }, + { + name: "pipelinesref populated - malformed config", + pipelinesRef: &commonv1.ConfigSource{ + SecretRef: commonv1.SecretRef{ + SecretName: "my-secret-pipeline-2", + }, + }, + client: k8s.NewFakeClient(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret-pipeline-2", + }, + Data: map[string][]byte{"pipelines.yml": []byte("something:bad:value")}, + }), + want: pipelines.EmptyConfig(), + wantErr: true, + }, + { + name: "pipelinesref populated", + pipelinesRef: &commonv1.ConfigSource{ + SecretRef: commonv1.SecretRef{ + SecretName: "my-secret-pipeline-2", + }, + }, + client: k8s.NewFakeClient(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret-pipeline-2", + }, + Data: map[string][]byte{"pipelines.yml": []byte(`- "pipeline.id": "main"`)}, + }), + want: pipelines.MustParse([]byte(`- "pipeline.id": "main"`)), + }, + } { + t.Run(tt.name, func(t *testing.T) { + params := Params{ + Context: context.Background(), + Client: tt.client, + EventRecorder: &record.FakeRecorder{}, + Watches: watches.NewDynamicWatches(), + Logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + Pipelines: tt.pipelines, + PipelinesRef: tt.pipelinesRef, + }, + }, + } + + gotYaml, gotErr := buildPipeline(params) + diff, err := tt.want.Diff(pipelines.MustParse(gotYaml)) + if diff { + t.Errorf("buildPipeline() got unexpected differences: %v", err) + } + + require.Equal(t, tt.wantErr, gotErr != nil) + }) + } +} diff --git a/pkg/controller/logstash/pipelines/config.go b/pkg/controller/logstash/pipelines/config.go new file mode 100644 index 0000000000..f92de18aa5 --- /dev/null +++ b/pkg/controller/logstash/pipelines/config.go @@ -0,0 +1,135 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package pipelines + +import ( + "fmt" + "reflect" + + "github.com/elastic/go-ucfg" + uyaml "github.com/elastic/go-ucfg/yaml" + "gopkg.in/yaml.v3" +) + +// Config contains configuration for Logstash pipeline ("pipelines.yml"), +// `.` in between the key, pipeline.id, is treated as string +// pipelines.yml is expected an array of pipeline definition. +type Config ucfg.Config + +// Options are config options for the YAML file. +var Options = []ucfg.Option{ucfg.AppendValues} + +// EmptyConfig creates a new empty config. +func EmptyConfig() *Config { + return fromConfig(ucfg.New()) +} + +// FromSpec creates a new pipeline from spec. +func FromSpec(cfg interface{}) (*Config, error) { + config, err := ucfg.NewFrom(cfg, Options...) + if err != nil { + return nil, err + } + return fromConfig(config), nil +} + +// MustFromSpec creates a new pipeline and panics on errors. +// Use for testing only. +func MustFromSpec(cfg interface{}) *Config { + config, err := FromSpec(cfg) + if err != nil { + panic(err) + } + return config +} + +// Parse parses the given pipeline content into a PipelinesConfig. +// Expects content to be in YAML format. +func Parse(yml []byte) (*Config, error) { + config, err := uyaml.NewConfig(yml, Options...) + if err != nil { + return nil, err + } + return fromConfig(config), nil +} + +// MustParse parses the given pipeline content into a Pipelines. +// Expects content to be in YAML format. Panics on error. +// Use for testing only. +func MustParse(yml []byte) *Config { + config, err := uyaml.NewConfig(yml, Options...) + if err != nil { + panic(err) + } + return fromConfig(config) +} + +// Render returns the content of the configuration file, +// with fields sorted alphabetically. +func (c *Config) Render() ([]byte, error) { + if c == nil { + return []byte{}, nil + } + var out []interface{} + if err := c.asUCfg().Unpack(&out); err != nil { + return []byte{}, err + } + return yaml.Marshal(out) +} + +func (c *Config) asUCfg() *ucfg.Config { + return (*ucfg.Config)(c) +} + +func fromConfig(in *ucfg.Config) *Config { + return (*Config)(in) +} + +// Diff returns true if the key/value or the sequence of two PipelinesConfig are different. +// Use for testing only. +func (c *Config) Diff(c2 *Config) (bool, error) { + if c == c2 { + return false, nil + } + if c == nil && c2 != nil { + return true, fmt.Errorf("empty lhs config %s", c2.asUCfg().FlattenedKeys(Options...)) + } + if c != nil && c2 == nil { + return true, fmt.Errorf("empty rhs config %s", c.asUCfg().FlattenedKeys(Options...)) + } + + var s []map[string]interface{} + var s2 []map[string]interface{} + err := c.asUCfg().Unpack(&s, Options...) + if err != nil { + return true, err + } + err = c2.asUCfg().Unpack(&s2, Options...) + if err != nil { + return true, err + } + + return diffSlice(s, s2) +} + +// diffSlice returns true if the key/value or the sequence of two PipelinesConfig are different. +func diffSlice(s1, s2 []map[string]interface{}) (bool, error) { + if len(s1) != len(s2) { + return true, fmt.Errorf("array size doesn't match %d, %d", len(s1), len(s2)) + } + var diff []string + for i, m := range s1 { + m2 := s2[i] + if eq := reflect.DeepEqual(m, m2); !eq { + diff = append(diff, fmt.Sprintf("%s vs %s, ", m, m2)) + } + } + + if len(diff) > 0 { + return true, fmt.Errorf("there are %d differences. %s", len(diff), diff) + } + + return false, nil +} diff --git a/pkg/controller/logstash/pipelines/config_test.go b/pkg/controller/logstash/pipelines/config_test.go new file mode 100644 index 0000000000..43c16835bb --- /dev/null +++ b/pkg/controller/logstash/pipelines/config_test.go @@ -0,0 +1,287 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package pipelines + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPipelinesConfig_Render(t *testing.T) { + config := MustFromSpec( + []map[string]interface{}{ + { + "pipeline.id": "demo", + "config.string": "input { exec { command => \"uptime\" interval => 5 } } output { stdout{} }", + }, + { + "pipeline.id": "standard", + "pipeline.workers": 1, + "queue.type": "persisted", + "queue.drain": true, + "dead_letter_queue.max_bytes": "1024mb", + "path.config": "/tmp/logstash/*.config", + }, + }, + ) + output, err := config.Render() + require.NoError(t, err) + expected := []byte(`- config.string: input { exec { command => "uptime" interval => 5 } } output { stdout{} } + pipeline.id: demo +- dead_letter_queue.max_bytes: 1024mb + path.config: /tmp/logstash/*.config + pipeline.id: standard + pipeline.workers: 1 + queue.drain: true + queue.type: persisted +`) + require.Equal(t, string(expected), string(output)) +} + +func TestParsePipelinesConfig(t *testing.T) { + tests := []struct { + name string + input string + want *Config + wantErr bool + }{ + { + name: "no input", + input: "", + want: EmptyConfig(), + wantErr: false, + }, + { + name: "simple input", + input: "- pipeline.id: demo\n config.string: input { exec { command => \"${ENV}\" interval => 5 } }", + want: MustFromSpec( + []map[string]interface{}{ + { + "pipeline.id": "demo", + "config.string": "input { exec { command => \"${ENV}\" interval => 5 } }", + }, + }, + ), + wantErr: false, + }, + { + name: "number input", + input: "- pipeline.id: main\n pipeline.workers: 4", + want: MustFromSpec( + []map[string]interface{}{ + { + "pipeline.id": "main", + "pipeline.workers": 4, + }, + }, + ), + wantErr: false, + }, + { + name: "boolean input", + input: "- pipeline.id: main\n queue.drain: false", + want: MustFromSpec( + []map[string]interface{}{ + { + "pipeline.id": "main", + "queue.drain": false, + }, + }, + ), + wantErr: false, + }, + { + name: "trim whitespaces between key and value", + input: "- pipeline.id : demo \n path.config : /tmp/logstash/*.config ", + want: MustFromSpec( + []map[string]interface{}{ + { + "pipeline.id": "demo", + "path.config": "/tmp/logstash/*.config", + }, + }, + ), + wantErr: false, + }, + { + name: "tabs are invalid in YAML", + input: "\ta: b \n c:d ", + wantErr: true, + }, + { + name: "trim newlines", + input: "- pipeline.id: demo \n\n- pipeline.id: demo2 \n", + want: MustFromSpec( + []map[string]interface{}{ + {"pipeline.id": "demo"}, + {"pipeline.id": "demo2"}, + }, + ), + wantErr: false, + }, + { + name: "ignore comments", + input: "- pipeline.id: demo \n#this is a comment\n pipeline.workers: \"1\"\n", + want: MustFromSpec( + []map[string]interface{}{ + { + "pipeline.id": "demo", + "pipeline.workers": "1", + }, + }, + ), + wantErr: false, + }, + { + name: "support quotes", + input: `- "pipeline.id": "quote"`, + want: MustFromSpec( + []map[string]interface{}{ + {"pipeline.id": "quote"}, + }, + ), + wantErr: false, + }, + { + name: "support special characters", + input: `- config.string: "${node.ip}%.:=+è! /"`, + want: MustFromSpec( + []map[string]interface{}{ + {"config.string": `${node.ip}%.:=+è! /`}, + }, + ), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := Parse([]byte(tt.input)) + if (err != nil) != tt.wantErr { + t.Errorf("Parse() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if got == tt.want { + return + } + + if diff, _ := got.Diff(tt.want); diff { + gotRendered, err := got.Render() + require.NoError(t, err) + wantRendered, err := tt.want.Render() + require.NoError(t, err) + t.Errorf("Parse(), want: %s, got: %s", wantRendered, gotRendered) + } + }) + } +} + +func TestPipelinesConfig_Diff(t *testing.T) { + tests := []struct { + name string + c *Config + c2 *Config + wantDiff bool + }{ + { + name: "nil diff", + c: nil, + c2: nil, + wantDiff: false, + }, + { + name: "lhs nil", + c: nil, + c2: MustFromSpec( + []map[string]interface{}{ + {"a": "a"}, + {"b": "b"}, + }, + ), + wantDiff: true, + }, + { + name: "rhs nil", + c: MustFromSpec( + []map[string]interface{}{ + {"a": "a"}, + }, + ), + c2: nil, + wantDiff: true, + }, + { + name: "same multi key value", + c: MustFromSpec( + []map[string]interface{}{ + {"a": "a", "b": "b", "c": 1, "d": true}, + }, + ), + c2: MustFromSpec( + []map[string]interface{}{ + {"c": 1, "b": "b", "a": "a", "d": true}, + }, + ), + wantDiff: false, + }, + { + name: "different value", + c: MustFromSpec( + []map[string]interface{}{ + {"a": "a"}, + }, + ), + c2: MustFromSpec( + []map[string]interface{}{ + {"a": "b"}, + }, + ), + wantDiff: true, + }, + { + name: "array size different", + c: MustFromSpec( + []map[string]interface{}{ + {"a": "a"}, + }, + ), + c2: MustFromSpec( + []map[string]interface{}{ + {"a": "a"}, + {"a": "a"}, + }, + ), + wantDiff: true, + }, + { + name: "respects list order", + c: MustFromSpec( + []map[string]interface{}{ + {"a": "a"}, + {"b": "b"}, + }, + ), + c2: MustFromSpec( + []map[string]interface{}{ + {"b": "b"}, + {"a": "a"}, + }, + ), + wantDiff: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diff, err := tt.c.Diff(tt.c2) + if (err != nil) != tt.wantDiff { + t.Errorf("Diff() got unexpected differences. wantDiff: %t, err: %v", tt.wantDiff, err) + return + } + + require.Equal(t, tt.wantDiff, diff) + }) + } +} diff --git a/pkg/controller/logstash/pipelines/ref.go b/pkg/controller/logstash/pipelines/ref.go new file mode 100644 index 0000000000..4cf753c19b --- /dev/null +++ b/pkg/controller/logstash/pipelines/ref.go @@ -0,0 +1,37 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package pipelines + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/driver" +) + +// RefWatchName returns the name of the watch registered on the secret referenced in `pipelinesRef`. +func RefWatchName(resource types.NamespacedName) string { + return fmt.Sprintf("%s-%s-pipelinesref", resource.Namespace, resource.Name) +} + +// ParsePipelinesRef retrieves the content of a secret referenced in `pipelinesRef`, sets up dynamic watches for that secret, +// and parses the secret content into a PipelinesConfig. +func ParsePipelinesRef( + driver driver.Interface, + resource runtime.Object, + pipelinesRef *commonv1.ConfigSource, + secretKey string, // retrieve config data from that entry in the secret +) (*Config, error) { + parsed, err := common.ParseConfigRefToConfig(driver, resource, pipelinesRef, secretKey, RefWatchName, Options) + if err != nil { + return nil, err + } + + return (*Config)(parsed), nil +} diff --git a/pkg/controller/logstash/pipelines/ref_test.go b/pkg/controller/logstash/pipelines/ref_test.go new file mode 100644 index 0000000000..f71cdd1dae --- /dev/null +++ b/pkg/controller/logstash/pipelines/ref_test.go @@ -0,0 +1,191 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package pipelines + +import ( + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/driver" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/watches" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +type fakeDriver struct { + client k8s.Client + watches watches.DynamicWatches + recorder record.EventRecorder +} + +func (f fakeDriver) K8sClient() k8s.Client { + return f.client +} + +func (f fakeDriver) DynamicWatches() watches.DynamicWatches { + return f.watches +} + +func (f fakeDriver) Recorder() record.EventRecorder { + return f.recorder +} + +var _ driver.Interface = fakeDriver{} + +func TestParsePipelinesRef(t *testing.T) { + // any resource Kind would work here (eg. Beat, EnterpriseSearch, etc.) + resNsn := types.NamespacedName{Namespace: "ns", Name: "resource"} + res := corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: resNsn.Namespace, Name: resNsn.Name}} + watchName := RefWatchName(resNsn) + + tests := []struct { + name string + pipelinesRef *commonv1.ConfigSource + secretKey string + runtimeObjs []runtime.Object + want *Config + wantErr bool + existingWatches []string + wantWatches []string + wantEvent string + }{ + { + name: "happy path", + pipelinesRef: &commonv1.ConfigSource{SecretRef: commonv1.SecretRef{SecretName: "my-secret"}}, + secretKey: "configFile.yml", + runtimeObjs: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "my-secret"}, + Data: map[string][]byte{ + "configFile.yml": []byte(`- "pipeline.id": "main"`), + }}, + }, + want: MustParse([]byte(`- "pipeline.id": "main"`)), + wantWatches: []string{watchName}, + }, + { + name: "happy path, secret already watched", + pipelinesRef: &commonv1.ConfigSource{SecretRef: commonv1.SecretRef{SecretName: "my-secret"}}, + secretKey: "configFile.yml", + runtimeObjs: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "my-secret"}, + Data: map[string][]byte{ + "configFile.yml": []byte(`- "pipeline.id": "main"`), + }}, + }, + want: MustParse([]byte(`- "pipeline.id": "main"`)), + existingWatches: []string{watchName}, + wantWatches: []string{watchName}, + }, + { + name: "no pipelinesRef specified", + pipelinesRef: nil, + secretKey: "configFile.yml", + runtimeObjs: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "my-secret"}, + Data: map[string][]byte{ + "configFile.yml": []byte(`- "pipeline.id": "main"`), + }}, + }, + want: nil, + wantWatches: []string{}, + }, + { + name: "no pipelinesRef specified: clear existing watches", + pipelinesRef: nil, + secretKey: "configFile.yml", + runtimeObjs: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "my-secret"}, + Data: map[string][]byte{ + "configFile.yml": []byte(`- "pipeline.id": "main"`), + }}, + }, + want: nil, + existingWatches: []string{watchName}, + wantWatches: []string{}, + }, + { + name: "secret not found: error out but watch the future secret", + pipelinesRef: &commonv1.ConfigSource{SecretRef: commonv1.SecretRef{SecretName: "my-secret"}}, + secretKey: "configFile.yml", + runtimeObjs: []runtime.Object{}, + want: nil, + wantErr: true, + wantWatches: []string{watchName}, + }, + { + name: "missing key in the referenced secret: error out, watch the secret and emit an event", + pipelinesRef: &commonv1.ConfigSource{SecretRef: commonv1.SecretRef{SecretName: "my-secret"}}, + secretKey: "configFile.yml", + runtimeObjs: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "my-secret"}, + Data: map[string][]byte{ + "unexpected-key": []byte(`- "pipeline.id": "main"`), + }}, + }, + wantErr: true, + wantWatches: []string{watchName}, + wantEvent: "Warning Unexpected unable to parse configRef secret ns/my-secret: missing key configFile.yml", + }, + { + name: "invalid config the referenced secret: error out, watch the secret and emit an event", + pipelinesRef: &commonv1.ConfigSource{SecretRef: commonv1.SecretRef{SecretName: "my-secret"}}, + secretKey: "configFile.yml", + runtimeObjs: []runtime.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "my-secret"}, + Data: map[string][]byte{ + "configFile.yml": []byte("this.is invalid config"), + }}, + }, + wantErr: true, + wantWatches: []string{watchName}, + wantEvent: "Warning Unexpected unable to parse configFile.yml in configRef secret ns/my-secret", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + w := watches.NewDynamicWatches() + for _, existingWatch := range tt.existingWatches { + require.NoError(t, w.Secrets.AddHandler(watches.NamedWatch{Name: existingWatch})) + } + d := fakeDriver{ + client: k8s.NewFakeClient(tt.runtimeObjs...), + watches: w, + recorder: fakeRecorder, + } + got, err := ParsePipelinesRef(d, &res, tt.pipelinesRef, tt.secretKey) + if (err != nil) != tt.wantErr { + t.Errorf("ParsePipelinesRef() error = %v, wantErr %v", err, tt.wantErr) + return + } + require.Equal(t, tt.want, got) + require.Equal(t, tt.wantWatches, d.watches.Secrets.Registrations()) + + if tt.wantEvent != "" { + require.Equal(t, tt.wantEvent, <-fakeRecorder.Events) + } else { + // no event expected + select { + case e := <-fakeRecorder.Events: + require.Fail(t, "no event expected but got one", "event", e) + default: + // ok + } + } + }) + } +} diff --git a/pkg/controller/logstash/pod.go b/pkg/controller/logstash/pod.go new file mode 100644 index 0000000000..5fe4b148ec --- /dev/null +++ b/pkg/controller/logstash/pod.go @@ -0,0 +1,167 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "fmt" + "hash" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/intstr" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + commonassociation "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/association" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/container" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/defaults" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/tracing" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/network" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/stackmon" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/maps" +) + +const ( + ConfigVolumeName = "config" + ConfigMountPath = "/usr/share/logstash/config" + + LogstashConfigVolumeName = "logstash" + LogstashConfigFileName = "logstash.yml" + + PipelineVolumeName = "pipeline" + PipelineFileName = "pipelines.yml" + + // ConfigHashAnnotationName is an annotation used to store the Logstash config hash. + ConfigHashAnnotationName = "logstash.k8s.elastic.co/config-hash" + + // VersionLabelName is a label used to track the version of a Logstash Pod. + VersionLabelName = "logstash.k8s.elastic.co/version" +) + +var ( + DefaultResources = corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceCPU: resource.MustParse("2000m"), + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceCPU: resource.MustParse("1000m"), + }, + } +) + +func buildPodTemplate(params Params, configHash hash.Hash32) (corev1.PodTemplateSpec, error) { + defer tracing.Span(¶ms.Context)() + spec := ¶ms.Logstash.Spec + builder := defaults.NewPodTemplateBuilder(params.GetPodTemplate(), logstashv1alpha1.LogstashContainerName) + + vols, err := buildVolumes(params) + if err != nil { + return corev1.PodTemplateSpec{}, err + } + + esAssociations := getEsAssociations(params) + if err := writeEsAssocToConfigHash(params, esAssociations, configHash); err != nil { + return corev1.PodTemplateSpec{}, err + } + + envs, err := buildEnv(params, esAssociations) + if err != nil { + return corev1.PodTemplateSpec{}, err + } + + labels := maps.Merge(params.Logstash.GetIdentityLabels(), map[string]string{ + VersionLabelName: spec.Version}) + + annotations := map[string]string{ + ConfigHashAnnotationName: fmt.Sprint(configHash.Sum32()), + } + + ports := getDefaultContainerPorts() + + builder = builder. + WithResources(DefaultResources). + WithLabels(labels). + WithAnnotations(annotations). + WithDockerImage(spec.Image, container.ImageRepository(container.LogstashImage, spec.Version)). + WithAutomountServiceAccountToken(). + WithPorts(ports). + WithReadinessProbe(readinessProbe(params.Logstash)). + WithVolumeLikes(vols...). + WithInitContainers(initConfigContainer(params.Logstash)). + WithEnv(envs...). + WithInitContainerDefaults() + + builder, err = stackmon.WithMonitoring(params.Context, params.Client, builder, params.Logstash) + if err != nil { + return corev1.PodTemplateSpec{}, err + } + + // TODO integrate with api.ssl.enabled + // if params.Logstash.Spec.HTTP.TLS.Enabled() { + // httpVol := certificates.HTTPCertSecretVolume(logstashv1alpha1.Namer, params.Logstash.Name) + // builder. + // WithVolumes(httpVol.Volume()). + // WithVolumeMounts(httpVol.VolumeMount()) + // } + + return builder.PodTemplate, nil +} + +func getDefaultContainerPorts() []corev1.ContainerPort { + return []corev1.ContainerPort{ + {Name: "http", ContainerPort: int32(network.HTTPPort), Protocol: corev1.ProtocolTCP}, + } +} + +// readinessProbe is the readiness probe for the Logstash container +func readinessProbe(logstash logstashv1alpha1.Logstash) corev1.Probe { + var scheme = corev1.URISchemeHTTP + var port = network.HTTPPort + for _, service := range logstash.Spec.Services { + if service.Name == LogstashAPIServiceName && len(service.Service.Spec.Ports) > 0 { + port = int(service.Service.Spec.Ports[0].Port) + } + } + probe := corev1.Probe{ + FailureThreshold: 3, + InitialDelaySeconds: 30, + PeriodSeconds: 10, + SuccessThreshold: 1, + TimeoutSeconds: 5, + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Port: intstr.FromInt(port), + Path: "/", + Scheme: scheme, + }, + }, + } + return probe +} + +func getEsAssociations(params Params) []commonv1.Association { + var esAssociations []commonv1.Association + + for _, assoc := range params.Logstash.GetAssociations() { + if assoc.AssociationType() == commonv1.ElasticsearchAssociationType { + esAssociations = append(esAssociations, assoc) + } + } + return esAssociations +} + +func writeEsAssocToConfigHash(params Params, esAssociations []commonv1.Association, configHash hash.Hash) error { + if esAssociations == nil { + return nil + } + + return commonassociation.WriteAssocsToConfigHash( + params.Client, + esAssociations, + configHash, + ) +} diff --git a/pkg/controller/logstash/pod_test.go b/pkg/controller/logstash/pod_test.go new file mode 100644 index 0000000000..7cfa9171a9 --- /dev/null +++ b/pkg/controller/logstash/pod_test.go @@ -0,0 +1,286 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "hash/fnv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/container" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/pod" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func TestNewPodTemplateSpec(t *testing.T) { + tests := []struct { + name string + logstash logstashv1alpha1.Logstash + assertions func(pod corev1.PodTemplateSpec) + }{ + { + name: "defaults", + logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + }, + }, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, false, *pod.Spec.AutomountServiceAccountToken) + assert.Len(t, pod.Spec.Containers, 1) + assert.Len(t, pod.Spec.InitContainers, 1) + assert.Len(t, pod.Spec.Volumes, 3) + assert.NotEmpty(t, pod.Annotations[ConfigHashAnnotationName]) + logstashContainer := GetLogstashContainer(pod.Spec) + require.NotNil(t, logstashContainer) + assert.Equal(t, 3, len(logstashContainer.VolumeMounts)) + assert.Equal(t, container.ImageRepository(container.LogstashImage, "8.6.1"), logstashContainer.Image) + assert.NotNil(t, logstashContainer.ReadinessProbe) + assert.NotEmpty(t, logstashContainer.Ports) + }, + }, + { + name: "with custom image", + logstash: logstashv1alpha1.Logstash{Spec: logstashv1alpha1.LogstashSpec{ + Image: "my-custom-image:1.0.0", + Version: "8.6.1", + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, "my-custom-image:1.0.0", GetLogstashContainer(pod.Spec).Image) + }, + }, + { + name: "with default resources", + logstash: logstashv1alpha1.Logstash{Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, DefaultResources, GetLogstashContainer(pod.Spec).Resources) + }, + }, + { + name: "with user-provided resources", + logstash: logstashv1alpha1.Logstash{Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "logstash", + Resources: corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, + }, + }, + }, + }, + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, GetLogstashContainer(pod.Spec).Resources) + }, + }, + { + name: "with user-provided init containers", + logstash: logstashv1alpha1.Logstash{Spec: logstashv1alpha1.LogstashSpec{ + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "user-init-container", + }, + }, + }, + }, + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Len(t, pod.Spec.InitContainers, 2) + assert.Equal(t, pod.Spec.Containers[0].Image, pod.Spec.InitContainers[0].Image) + }, + }, + { + name: "with user-provided labels", + logstash: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash-name", + }, + Spec: logstashv1alpha1.LogstashSpec{ + PodTemplate: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + NameLabelName: "overridden-logstash-name", + }, + }, + }, + Version: "8.6.1", + }}, + assertions: func(pod corev1.PodTemplateSpec) { + labels := (&logstashv1alpha1.Logstash{ObjectMeta: metav1.ObjectMeta{Name: "logstash-name"}}).GetIdentityLabels() + labels[VersionLabelName] = "8.6.1" + labels["label1"] = "value1" + labels["label2"] = "value2" + labels[NameLabelName] = "overridden-logstash-name" + assert.Equal(t, labels, pod.Labels) + }, + }, + { + name: "with user-provided ENV variable", + logstash: logstashv1alpha1.Logstash{Spec: logstashv1alpha1.LogstashSpec{ + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "logstash", + Env: []corev1.EnvVar{ + { + Name: "user-env", + Value: "user-env-value", + }, + }, + }, + }, + }, + }, + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Len(t, GetLogstashContainer(pod.Spec).Env, 1) + }, + }, + { + name: "with multiple services, readiness probe hits the correct port", + logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Services: []logstashv1alpha1.LogstashService{{ + Name: LogstashAPIServiceName, + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "api", Protocol: "TCP", Port: 9200}, + }, + }, + }}, { + Name: "notapi", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "notapi", Protocol: "TCP", Port: 9600}, + }, + }, + }}, + }, + }, + }, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, 9200, GetLogstashContainer(pod.Spec).ReadinessProbe.HTTPGet.Port.IntValue()) + }, + }, + { + name: "with api service customized, readiness probe hits the correct port", + logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + Services: []logstashv1alpha1.LogstashService{ + { + Name: LogstashAPIServiceName, + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "api", Protocol: "TCP", Port: 9200}, + }, + }, + }}, + }, + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, 9200, GetLogstashContainer(pod.Spec).ReadinessProbe.HTTPGet.Port.IntValue()) + }, + }, + { + name: "with default service, readiness probe hits the correct port", + logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.1", + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, 9600, GetLogstashContainer(pod.Spec).ReadinessProbe.HTTPGet.Port.IntValue()) + }, + }, + + { + name: "with custom annotation", + logstash: logstashv1alpha1.Logstash{Spec: logstashv1alpha1.LogstashSpec{ + Image: "my-custom-image:1.0.0", + Version: "8.6.1", + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Equal(t, "my-custom-image:1.0.0", GetLogstashContainer(pod.Spec).Image) + }, + }, + { + name: "with user-provided volumes and volume mounts", + logstash: logstashv1alpha1.Logstash{Spec: logstashv1alpha1.LogstashSpec{ + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "logstash", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "user-volume-mount", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "user-volume", + }, + }, + }, + }, + }}, + assertions: func(pod corev1.PodTemplateSpec) { + assert.Len(t, pod.Spec.Volumes, 4) + assert.Len(t, GetLogstashContainer(pod.Spec).VolumeMounts, 4) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + params := Params{ + Context: context.Background(), + Client: k8s.NewFakeClient(), + Logstash: tt.logstash, + } + configHash := fnv.New32a() + got, err := buildPodTemplate(params, configHash) + + require.NoError(t, err) + tt.assertions(got) + }) + } +} + +// GetLogstashContainer returns the Logstash container from the given podSpec. +func GetLogstashContainer(podSpec corev1.PodSpec) *corev1.Container { + return pod.ContainerByName(podSpec, logstashv1alpha1.LogstashContainerName) +} diff --git a/pkg/controller/logstash/reconcile.go b/pkg/controller/logstash/reconcile.go new file mode 100644 index 0000000000..e4521a83a2 --- /dev/null +++ b/pkg/controller/logstash/reconcile.go @@ -0,0 +1,82 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "reflect" + + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/sset" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/pkg/errors" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/tracing" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func reconcileStatefulSet(params Params, podTemplate corev1.PodTemplateSpec) (*reconciler.Results, logstashv1alpha1.LogstashStatus) { + defer tracing.Span(¶ms.Context)() + results := reconciler.NewResult(params.Context) + + s := sset.New(sset.Params{ + Name: logstashv1alpha1.Name(params.Logstash.Name), + Namespace: params.Logstash.Namespace, + ServiceName: logstashv1alpha1.APIServiceName(params.Logstash.Name), + Selector: params.Logstash.GetIdentityLabels(), + Labels: params.Logstash.GetIdentityLabels(), + PodTemplateSpec: podTemplate, + Replicas: params.Logstash.Spec.Count, + RevisionHistoryLimit: params.Logstash.Spec.RevisionHistoryLimit, + }) + if err := controllerutil.SetControllerReference(¶ms.Logstash, &s, scheme.Scheme); err != nil { + return results.WithError(err), params.Status + } + + reconciled, err := sset.Reconcile(params.Context, params.Client, s, ¶ms.Logstash) + if err != nil { + return results.WithError(err), params.Status + } + + var status logstashv1alpha1.LogstashStatus + + if status, err = calculateStatus(¶ms, reconciled.Status.ReadyReplicas, reconciled.Status.Replicas); err != nil { + results.WithError(errors.Wrap(err, "while calculating status")) + } + return results, status +} + +// calculateStatus will calculate a new status from the state of the pods within the k8s cluster +// and will return any error encountered. +func calculateStatus(params *Params, ready, desired int32) (logstashv1alpha1.LogstashStatus, error) { + logstash := params.Logstash + status := params.Status + + pods, err := k8s.PodsMatchingLabels(params.Client, logstash.Namespace, map[string]string{NameLabelName: logstash.Name}) + if err != nil { + return status, err + } + + status.Version = common.LowestVersionFromPods(params.Context, status.Version, pods, VersionLabelName) + status.AvailableNodes = ready + status.ExpectedNodes = desired + return status, nil +} + +// updateStatus will update the Elastic Logstash's status within the k8s cluster, using the given Elastic Logstash and status. +func updateStatus(ctx context.Context, logstash logstashv1alpha1.Logstash, client client.Client, status logstashv1alpha1.LogstashStatus) error { + if reflect.DeepEqual(logstash.Status, status) { + return nil + } + logstash.Status = status + return common.UpdateStatus(ctx, client, &logstash) +} diff --git a/pkg/controller/logstash/service.go b/pkg/controller/logstash/service.go new file mode 100644 index 0000000000..9f1bea3879 --- /dev/null +++ b/pkg/controller/logstash/service.go @@ -0,0 +1,97 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/defaults" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/network" +) + +const ( + LogstashAPIServiceName = "api" +) + +// reconcileServices reconcile Services defined in spec +// +// When a service is defined that matches the API service name, then that service is used to define +// the service for the logstash API. If not, then a default service is created for the API service +func reconcileServices(params Params) ([]corev1.Service, error) { + createdAPIService := false + + svcs := make([]corev1.Service, 0, len(params.Logstash.Spec.Services)+1) + for _, service := range params.Logstash.Spec.Services { + logstash := params.Logstash + if logstashv1alpha1.UserServiceName(logstash.Name, service.Name) == logstashv1alpha1.APIServiceName(logstash.Name) { + createdAPIService = true + } + svc := newService(service, params.Logstash) + if err := reconcileService(params, svc); err != nil { + return []corev1.Service{}, err + } + svcs = append(svcs, *svc) + } + if !createdAPIService { + svc := newAPIService(params.Logstash) + if err := reconcileService(params, svc); err != nil { + return []corev1.Service{}, err + } + svcs = append(svcs, *svc) + } + + return svcs, nil +} + +func reconcileService(params Params, service *corev1.Service) error { + _, err := common.ReconcileService(params.Context, params.Client, service, ¶ms.Logstash) + if err != nil { + return err + } + return nil +} + +func newService(service logstashv1alpha1.LogstashService, logstash logstashv1alpha1.Logstash) *corev1.Service { + svc := corev1.Service{ + ObjectMeta: service.Service.ObjectMeta, + Spec: service.Service.Spec, + } + + svc.ObjectMeta.Namespace = logstash.Namespace + svc.ObjectMeta.Name = logstashv1alpha1.UserServiceName(logstash.Name, service.Name) + + labels := NewLabels(logstash) + + svc.Labels = labels + + if svc.Spec.Selector == nil { + svc.Spec.Selector = labels + } + + return &svc +} + +func newAPIService(logstash logstashv1alpha1.Logstash) *corev1.Service { + svc := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.ServiceSpec{ClusterIP: "None"}, + } + + svc.ObjectMeta.Namespace = logstash.Namespace + svc.ObjectMeta.Name = logstashv1alpha1.APIServiceName(logstash.Name) + + labels := NewLabels(logstash) + ports := []corev1.ServicePort{ + { + Name: LogstashAPIServiceName, + Protocol: corev1.ProtocolTCP, + Port: network.HTTPPort, + }, + } + return defaults.SetServiceDefaults(&svc, labels, labels, ports) +} \ No newline at end of file diff --git a/pkg/controller/logstash/service_test.go b/pkg/controller/logstash/service_test.go new file mode 100644 index 0000000000..90123b0dc5 --- /dev/null +++ b/pkg/controller/logstash/service_test.go @@ -0,0 +1,216 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/comparison" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func TestReconcileServices(t *testing.T) { + trueVal := true + testCases := []struct { + name string + logstash logstashv1alpha1.Logstash + wantSvc []corev1.Service + }{ + { + name: "default service", + logstash: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash", + Namespace: "test", + }, + }, + wantSvc: []corev1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash-ls-api", + Namespace: "test", + Labels: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "logstash.k8s.elastic.co/v1alpha1", + Kind: "Logstash", + Name: "logstash", + Controller: &trueVal, + BlockOwnerDeletion: &trueVal, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + ClusterIP: "None", + Ports: []corev1.ServicePort{ + {Name: "api", Protocol: "TCP", Port: 9600}, + }, + }, + }}, + }, + { + name: "Changed port on default service", + logstash: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash", + Namespace: "test", + }, + Spec: logstashv1alpha1.LogstashSpec{ + Services: []logstashv1alpha1.LogstashService{{ + Name: LogstashAPIServiceName, + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: LogstashAPIServiceName, Protocol: "TCP", Port: 9200}, + }, + }, + }, + }}, + }, + }, + wantSvc: []corev1.Service{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash-ls-api", + Namespace: "test", + Labels: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "logstash.k8s.elastic.co/v1alpha1", + Kind: "Logstash", + Name: "logstash", + Controller: &trueVal, + BlockOwnerDeletion: &trueVal, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + ClusterIP: "", + Ports: []corev1.ServicePort{ + {Name: "api", Protocol: "TCP", Port: 9200}, + }, + }, + }}, + }, + { + name: "Default service plus one", + logstash: logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash", + Namespace: "test", + }, + Spec: logstashv1alpha1.LogstashSpec{ + Services: []logstashv1alpha1.LogstashService{{ + Name: "test", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Protocol: "TCP", Port: 9500}, + }, + }, + }, + }}, + }, + }, + wantSvc: []corev1.Service{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash-ls-test", + Namespace: "test", + Labels: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "logstash.k8s.elastic.co/v1alpha1", + Kind: "Logstash", + Name: "logstash", + Controller: &trueVal, + BlockOwnerDeletion: &trueVal, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + Ports: []corev1.ServicePort{ + {Protocol: "TCP", Port: 9500}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "logstash-ls-api", + Namespace: "test", + Labels: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "logstash.k8s.elastic.co/v1alpha1", + Kind: "Logstash", + Name: "logstash", + Controller: &trueVal, + BlockOwnerDeletion: &trueVal, + }, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "common.k8s.elastic.co/type": "logstash", + "logstash.k8s.elastic.co/name": "logstash", + }, + ClusterIP: "None", + Ports: []corev1.ServicePort{ + {Name: "api", Protocol: "TCP", Port: 9600}, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + client := k8s.NewFakeClient() + params := Params{ + Context: context.Background(), + Client: client, + Logstash: tc.logstash, + } + haveSvc, err := reconcileServices(params) + require.NoError(t, err) + require.Equal(t, len(tc.wantSvc), len(haveSvc)) + + for i := range tc.wantSvc { + comparison.AssertEqual(t, &tc.wantSvc[i], &haveSvc[i]) + } + }) + } +} diff --git a/pkg/controller/logstash/sset/sset.go b/pkg/controller/logstash/sset/sset.go new file mode 100644 index 0000000000..b76bac4793 --- /dev/null +++ b/pkg/controller/logstash/sset/sset.go @@ -0,0 +1,96 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package sset + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/hash" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/maps" +) + +type Params struct { + Name string + Namespace string + ServiceName string + Selector map[string]string + Labels map[string]string + PodTemplateSpec corev1.PodTemplateSpec + Replicas int32 + RevisionHistoryLimit *int32 +} + +func New(params Params) appsv1.StatefulSet { + sset := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: params.Name, + Namespace: params.Namespace, + Labels: params.Labels, + }, + Spec: appsv1.StatefulSetSpec{ + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.RollingUpdateStatefulSetStrategyType, + }, + // we don't care much about pods creation ordering, and manage deletion ordering ourselves, + // so we're fine with the StatefulSet controller spawning all pods in parallel + PodManagementPolicy: appsv1.ParallelPodManagement, + RevisionHistoryLimit: params.RevisionHistoryLimit, + // build a headless service per StatefulSet, matching the StatefulSet labels + ServiceName: params.ServiceName, + Selector: &metav1.LabelSelector{ + MatchLabels: params.Selector, + }, + + Replicas: ¶ms.Replicas, + Template: params.PodTemplateSpec, + }, + } + + // store a hash of the sset resource in its labels for comparison purposes + sset.Labels = hash.SetTemplateHashLabel(sset.Labels, sset.Spec) + + return sset +} + +// Reconcile creates or updates the expected StatefulSet. +func Reconcile(ctx context.Context, c k8s.Client, expected appsv1.StatefulSet, owner client.Object) (appsv1.StatefulSet, error) { + var reconciled appsv1.StatefulSet + + err := reconciler.ReconcileResource(reconciler.Params{ + Context: ctx, + Client: c, + Owner: owner, + Expected: &expected, + Reconciled: &reconciled, + NeedsUpdate: func() bool { + // expected labels or annotations not there + return !maps.IsSubset(expected.Labels, reconciled.Labels) || + !maps.IsSubset(expected.Annotations, reconciled.Annotations) || + // different spec + !EqualTemplateHashLabels(expected, reconciled) + }, + UpdateReconciled: func() { + // override annotations and labels with expected ones + // don't remove additional values in reconciled that may have been defaulted or + // manually set by the user on the existing resource + reconciled.Labels = maps.Merge(reconciled.Labels, expected.Labels) + reconciled.Annotations = maps.Merge(reconciled.Annotations, expected.Annotations) + reconciled.Spec = expected.Spec + }, + }) + return reconciled, err +} + +// EqualTemplateHashLabels reports whether actual and expected StatefulSets have the same template hash label value. +func EqualTemplateHashLabels(expected, actual appsv1.StatefulSet) bool { + return expected.Labels[hash.TemplateHashLabelName] == actual.Labels[hash.TemplateHashLabelName] +} diff --git a/pkg/controller/logstash/stackmon/beat_config.go b/pkg/controller/logstash/stackmon/beat_config.go new file mode 100644 index 0000000000..12e45eeb8d --- /dev/null +++ b/pkg/controller/logstash/stackmon/beat_config.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package stackmon + +import ( + "context" + _ "embed" // for the beats config files + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/stackmon/monitoring" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +var ( + // metricbeatConfigTemplate is a configuration template for Metricbeat to collect monitoring data about Kibana + //go:embed metricbeat.tpl.yml + metricbeatConfigTemplate string + + // filebeatConfig is a static configuration for Filebeat to collect Kibana logs + //go:embed filebeat.yml + filebeatConfig string +) + +// ReconcileConfigSecrets reconciles the secrets holding beats configuration +func ReconcileConfigSecrets(ctx context.Context, client k8s.Client, logstash logstashv1alpha1.Logstash) error { + isMonitoringReconcilable, err := monitoring.IsReconcilable(&logstash) + if err != nil { + return err + } + if !isMonitoringReconcilable { + return nil + } + + if monitoring.IsMetricsDefined(&logstash) { + b, err := Metricbeat(ctx, client, logstash) + if err != nil { + return err + } + + if _, err := reconciler.ReconcileSecret(ctx, client, b.ConfigSecret, &logstash); err != nil { + return err + } + } + + if monitoring.IsLogsDefined(&logstash) { + b, err := Filebeat(ctx, client, logstash) + if err != nil { + return err + } + + if _, err := reconciler.ReconcileSecret(ctx, client, b.ConfigSecret, &logstash); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/controller/logstash/stackmon/filebeat.yml b/pkg/controller/logstash/stackmon/filebeat.yml new file mode 100644 index 0000000000..314ef3c1f2 --- /dev/null +++ b/pkg/controller/logstash/stackmon/filebeat.yml @@ -0,0 +1,19 @@ +filebeat.modules: + - module: logstash + log: + enabled: true + var.paths: + - "/usr/share/logstash/logs/logstash-plain.log" + - "/usr/share/logstash/logs/logstash-json.log" + - "/usr/share/logstash/logs/logstash-deprecation.log" + slowlog: + enabled: true + var.paths: + - "/usr/share/logstash/logs/logstash-slowlog-plain.log" + - "/usr/share/logstash/logs/logstash-slowlog-json.log" + +processors: + - add_cloud_metadata: {} + - add_host_metadata: {} + +# Elasticsearch output configuration is generated \ No newline at end of file diff --git a/pkg/controller/logstash/stackmon/ls_config.go b/pkg/controller/logstash/stackmon/ls_config.go new file mode 100644 index 0000000000..bd8f9c096e --- /dev/null +++ b/pkg/controller/logstash/stackmon/ls_config.go @@ -0,0 +1,14 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package stackmon + +import ( + corev1 "k8s.io/api/core/v1" +) + +// fileLogStyleEnvVar returns the environment variable to configure the Logstash container to write logs to disk +func fileLogStyleEnvVar() corev1.EnvVar { + return corev1.EnvVar{Name: "LOG_STYLE", Value: "file"} +} diff --git a/pkg/controller/logstash/stackmon/metricbeat.tpl.yml b/pkg/controller/logstash/stackmon/metricbeat.tpl.yml new file mode 100644 index 0000000000..cbc84c40a9 --- /dev/null +++ b/pkg/controller/logstash/stackmon/metricbeat.tpl.yml @@ -0,0 +1,13 @@ +metricbeat.modules: + - module: logstash + metricsets: + - node + - node_stats + period: 10s + hosts: ["{{ .URL }}"] + xpack.enabled: true +processors: + - add_cloud_metadata: {} + - add_host_metadata: {} + +# Elasticsearch output configuration is generated diff --git a/pkg/controller/logstash/stackmon/sidecar.go b/pkg/controller/logstash/stackmon/sidecar.go new file mode 100644 index 0000000000..c178de44fe --- /dev/null +++ b/pkg/controller/logstash/stackmon/sidecar.go @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package stackmon + +import ( + "context" + "fmt" + "hash/fnv" + + corev1 "k8s.io/api/core/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/defaults" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/stackmon" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/stackmon/monitoring" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/volume" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/network" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +const ( + // cfgHashAnnotation is used to store a hash of the Metricbeat and Filebeat configurations. + cfgHashAnnotation = "logstash.k8s.elastic.co/monitoring-config-hash" + + logstashLogsVolumeName = "logstash-logs" + logstashLogsMountPath = "/usr/share/logstash/logs" +) + +func Metricbeat(ctx context.Context, client k8s.Client, logstash logstashv1alpha1.Logstash) (stackmon.BeatSidecar, error) { + metricbeat, err := stackmon.NewMetricBeatSidecar( + ctx, + client, + commonv1.LogstashMonitoringAssociationType, + &logstash, + logstash.Spec.Version, + metricbeatConfigTemplate, + logstashv1alpha1.Namer, + fmt.Sprintf("%s://localhost:%d", "http" /*logstash.Spec.HTTP.Protocol()*/, network.HTTPPort), + //TODO: integrate username password with Logstash metrics API + "", /* no username for metrics API */ + "", /* no password for metrics API */ + false, + ) + if err != nil { + return stackmon.BeatSidecar{}, err + } + return metricbeat, nil +} + +func Filebeat(ctx context.Context, client k8s.Client, logstash logstashv1alpha1.Logstash) (stackmon.BeatSidecar, error) { + return stackmon.NewFileBeatSidecar(ctx, client, &logstash, logstash.Spec.Version, filebeatConfig, nil) +} + +// WithMonitoring updates the Logstash Pod template builder to deploy Metricbeat and Filebeat in sidecar containers +// in the Logstash pod and injects the volumes for the beat configurations and the ES CA certificates. +func WithMonitoring(ctx context.Context, client k8s.Client, builder *defaults.PodTemplateBuilder, logstash logstashv1alpha1.Logstash) (*defaults.PodTemplateBuilder, error) { + isMonitoringReconcilable, err := monitoring.IsReconcilable(&logstash) + if err != nil { + return nil, err + } + if !isMonitoringReconcilable { + return builder, nil + } + + configHash := fnv.New32a() + var volumes []corev1.Volume + + if monitoring.IsMetricsDefined(&logstash) { + b, err := Metricbeat(ctx, client, logstash) + if err != nil { + return nil, err + } + + volumes = append(volumes, b.Volumes...) + builder.WithContainers(b.Container) + configHash.Write(b.ConfigHash.Sum(nil)) + } + + if monitoring.IsLogsDefined(&logstash) { + // Set environment variable to tell Logstash container to write logs to disk + builder.WithEnv(fileLogStyleEnvVar()) + + b, err := Filebeat(ctx, client, logstash) + if err != nil { + return nil, err + } + + // create a logs volume shared between Logstash and Filebeat + // TODO: revisit log volume when persistent storage is added + logsVolume := volume.NewEmptyDirVolume(logstashLogsVolumeName, logstashLogsMountPath) + volumes = append(volumes, logsVolume.Volume()) + filebeat := b.Container + filebeat.VolumeMounts = append(filebeat.VolumeMounts, logsVolume.VolumeMount()) + builder.WithVolumeMounts(logsVolume.VolumeMount()) + + volumes = append(volumes, b.Volumes...) + builder.WithContainers(filebeat) + configHash.Write(b.ConfigHash.Sum(nil)) + } + + // add the config hash annotation to ensure pod rotation when an ES password or a CA are rotated + builder.WithAnnotations(map[string]string{cfgHashAnnotation: fmt.Sprint(configHash.Sum32())}) + // inject all volumes + builder.WithVolumes(volumes...) + + return builder, nil +} diff --git a/pkg/controller/logstash/stackmon/sidecar_test.go b/pkg/controller/logstash/stackmon/sidecar_test.go new file mode 100644 index 0000000000..627486e106 --- /dev/null +++ b/pkg/controller/logstash/stackmon/sidecar_test.go @@ -0,0 +1,179 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package stackmon + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/defaults" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/stackmon/monitoring" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" +) + +func TestWithMonitoring(t *testing.T) { + sampleLs := logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sample", + Namespace: "aerospace", + }, + Spec: logstashv1alpha1.LogstashSpec{ + Version: "8.6.0", + }, + } + monitoringEsRef := []commonv1.ObjectSelector{{Name: "monitoring", Namespace: "observability"}} + logsEsRef := []commonv1.ObjectSelector{{Name: "logs", Namespace: "observability"}} + + fakeMetricsBeatUserSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "sample-observability-monitoring-beat-es-mon-user", Namespace: "aerospace"}, + Data: map[string][]byte{"aerospace-sample-observability-monitoring-beat-es-mon-user": []byte("1234567890")}, + } + fakeLogsBeatUserSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "sample-observability-logs-beat-es-mon-user", Namespace: "aerospace"}, + Data: map[string][]byte{"aerospace-sample-observability-logs-beat-es-mon-user": []byte("1234567890")}, + } + fakeEsHTTPCertSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "sample-es-http-certs-public", Namespace: "aerospace"}, + Data: map[string][]byte{ + "tls.crt": []byte("7H1515N074r341C3r71F1C473"), + "ca.crt": []byte("7H1515N074r341C3r71F1C473"), + }, + } + fakeLsHTTPCertSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "sample-ls-http-certs-public", Namespace: "aerospace"}, + Data: map[string][]byte{ + "tls.crt": []byte("7H1515N074r341C3r71F1C473"), + "ca.crt": []byte("7H1515N074r341C3r71F1C473"), + }, + } + fakeClient := k8s.NewFakeClient(&fakeMetricsBeatUserSecret, &fakeLogsBeatUserSecret, &fakeEsHTTPCertSecret, &fakeLsHTTPCertSecret) + + monitoringAssocConf := commonv1.AssociationConf{ + AuthSecretName: "sample-observability-monitoring-beat-es-mon-user", + AuthSecretKey: "aerospace-sample-observability-monitoring-beat-es-mon-user", + CACertProvided: true, + CASecretName: "sample-es-monitoring-observability-monitoring-ca", + URL: "https://monitoring-es-http.observability.svc:9200", + Version: "8.6.0", + } + logsAssocConf := commonv1.AssociationConf{ + AuthSecretName: "sample-observability-logs-beat-es-mon-user", + AuthSecretKey: "aerospace-sample-observability-logs-beat-es-mon-user", + CACertProvided: true, + CASecretName: "sample-es-logs-observability-monitoring-ca", + URL: "https://logs-es-http.observability.svc:9200", + Version: "8.6.0", + } + + tests := []struct { + name string + ls func() logstashv1alpha1.Logstash + containersLength int + esEnvVarsLength int + podVolumesLength int + metricsVolumeMountsLength int + logVolumeMountsLength int + }{ + { + name: "without monitoring", + ls: func() logstashv1alpha1.Logstash { + return sampleLs + }, + containersLength: 1, + }, + { + name: "with metrics monitoring", + ls: func() logstashv1alpha1.Logstash { + sampleLs.Spec.Monitoring.Metrics.ElasticsearchRefs = monitoringEsRef + monitoring.GetMetricsAssociation(&sampleLs)[0].SetAssociationConf(&monitoringAssocConf) + return sampleLs + }, + containersLength: 2, + esEnvVarsLength: 0, + podVolumesLength: 2, + metricsVolumeMountsLength: 2, + }, + { + name: "with logs monitoring", + ls: func() logstashv1alpha1.Logstash { + sampleLs.Spec.Monitoring.Metrics.ElasticsearchRefs = nil + sampleLs.Spec.Monitoring.Logs.ElasticsearchRefs = monitoringEsRef + monitoring.GetLogsAssociation(&sampleLs)[0].SetAssociationConf(&monitoringAssocConf) + return sampleLs + }, + containersLength: 2, + esEnvVarsLength: 1, + podVolumesLength: 3, + logVolumeMountsLength: 3, + }, + { + name: "with metrics and logs monitoring", + ls: func() logstashv1alpha1.Logstash { + sampleLs.Spec.Monitoring.Metrics.ElasticsearchRefs = monitoringEsRef + monitoring.GetMetricsAssociation(&sampleLs)[0].SetAssociationConf(&monitoringAssocConf) + sampleLs.Spec.Monitoring.Logs.ElasticsearchRefs = monitoringEsRef + monitoring.GetLogsAssociation(&sampleLs)[0].SetAssociationConf(&logsAssocConf) + return sampleLs + }, + containersLength: 3, + esEnvVarsLength: 1, + podVolumesLength: 4, + metricsVolumeMountsLength: 2, + logVolumeMountsLength: 3, + }, + { + name: "with metrics and logs monitoring with different es ref", + ls: func() logstashv1alpha1.Logstash { + sampleLs.Spec.Monitoring.Metrics.ElasticsearchRefs = monitoringEsRef + monitoring.GetMetricsAssociation(&sampleLs)[0].SetAssociationConf(&monitoringAssocConf) + sampleLs.Spec.Monitoring.Logs.ElasticsearchRefs = logsEsRef + monitoring.GetLogsAssociation(&sampleLs)[0].SetAssociationConf(&logsAssocConf) + return sampleLs + }, + containersLength: 3, + esEnvVarsLength: 1, + podVolumesLength: 5, + metricsVolumeMountsLength: 2, + logVolumeMountsLength: 3, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ls := tc.ls() + builder := defaults.NewPodTemplateBuilder(corev1.PodTemplateSpec{}, logstashv1alpha1.LogstashContainerName) + _, err := WithMonitoring(context.Background(), fakeClient, builder, ls) + assert.NoError(t, err) + + assert.Equal(t, tc.containersLength, len(builder.PodTemplate.Spec.Containers)) + for _, v := range builder.PodTemplate.Spec.Volumes { + fmt.Println(v) + } + assert.Equal(t, tc.podVolumesLength, len(builder.PodTemplate.Spec.Volumes)) + + if monitoring.IsMetricsDefined(&ls) { + for _, c := range builder.PodTemplate.Spec.Containers { + if c.Name == "metricbeat" { + assert.Equal(t, tc.metricsVolumeMountsLength, len(c.VolumeMounts)) + } + } + } + if monitoring.IsLogsDefined(&ls) { + for _, c := range builder.PodTemplate.Spec.Containers { + if c.Name == "filebeat" { + assert.Equal(t, tc.logVolumeMountsLength, len(c.VolumeMounts)) + } + } + } + }) + } +} diff --git a/pkg/controller/logstash/volume.go b/pkg/controller/logstash/volume.go new file mode 100644 index 0000000000..9737380fdc --- /dev/null +++ b/pkg/controller/logstash/volume.go @@ -0,0 +1,95 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "fmt" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/volume" +) + +const ( + InitContainerConfigVolumeMountPath = "/mnt/elastic-internal/logstash-config-local" + + // InternalConfigVolumeName is a volume which contains the generated configuration. + InternalConfigVolumeName = "elastic-internal-logstash-config" + InternalConfigVolumeMountPath = "/mnt/elastic-internal/logstash-config" + InternalPipelineVolumeName = "elastic-internal-logstash-pipeline" + InternalPipelineVolumeMountPath = "/mnt/elastic-internal/logstash-pipeline" +) + +var ( + // ConfigSharedVolume contains the Logstash config/ directory, it contains the contents of config from the docker container + ConfigSharedVolume = volume.SharedVolume{ + VolumeName: ConfigVolumeName, + InitContainerMountPath: InitContainerConfigVolumeMountPath, + ContainerMountPath: ConfigMountPath, + } +) + +// ConfigVolume returns a SecretVolume to hold the Logstash config of the given Logstash resource. +func ConfigVolume(ls logstashv1alpha1.Logstash) volume.SecretVolume { + return volume.NewSecretVolumeWithMountPath( + logstashv1alpha1.ConfigSecretName(ls.Name), + InternalConfigVolumeName, + InternalConfigVolumeMountPath, + ) +} + +// PipelineVolume returns a SecretVolume to hold the Logstash config of the given Logstash resource. +func PipelineVolume(ls logstashv1alpha1.Logstash) volume.SecretVolume { + return volume.NewSecretVolumeWithMountPath( + logstashv1alpha1.PipelineSecretName(ls.Name), + InternalPipelineVolumeName, + InternalPipelineVolumeMountPath, + ) +} + +func buildVolumes(params Params) ([]volume.VolumeLike, error) { + vols := []volume.VolumeLike{ConfigSharedVolume, ConfigVolume(params.Logstash), PipelineVolume(params.Logstash)} + + // all volumes with CAs of direct associations + caAssocVols, err := getVolumesFromAssociations(params.Logstash.GetAssociations()) + if err != nil { + return nil, err + } + + vols = append(vols, caAssocVols...) + + return vols, nil +} + +func getVolumesFromAssociations(associations []commonv1.Association) ([]volume.VolumeLike, error) { + var vols []volume.VolumeLike //nolint:prealloc + for i, assoc := range associations { + assocConf, err := assoc.AssociationConf() + if err != nil { + return nil, err + } + if !assocConf.CAIsConfigured() { + // skip as there is no volume to mount if association has no CA configured + continue + } + caSecretName := assocConf.GetCASecretName() + vols = append(vols, volume.NewSecretVolumeWithMountPath( + caSecretName, + fmt.Sprintf("%s-certs-%d", assoc.AssociationType(), i), + certificatesDir(assoc), + )) + } + return vols, nil +} + +func certificatesDir(association commonv1.Association) string { + ref := association.AssociationRef() + return fmt.Sprintf( + "/mnt/elastic-internal/%s-association/%s/%s/certs", + association.AssociationType(), + ref.Namespace, + ref.NameOrSecretName(), + ) +} diff --git a/pkg/controller/logstash/volume_test.go b/pkg/controller/logstash/volume_test.go new file mode 100644 index 0000000000..af5f705d92 --- /dev/null +++ b/pkg/controller/logstash/volume_test.go @@ -0,0 +1,89 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "testing" + + "github.com/stretchr/testify/require" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" +) + +func Test_getVolumesFromAssociations(t *testing.T) { + // Note: we use setAssocConfs to set the AssociationConfs which are normally set in the reconciliation loop. + for _, tt := range []struct { + name string + params Params + setAssocConfs func(assocs []commonv1.Association) + wantAssociationsLength int + }{ + { + name: "es refs", + params: Params{ + Logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + ElasticsearchRefs: []logstashv1alpha1.ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{Name: "elasticsearch"}, + ClusterName: "production", + }, + { + ObjectSelector: commonv1.ObjectSelector{Name: "elasticsearch2"}, + ClusterName: "production2", + }, + }, + }, + }, + }, + setAssocConfs: func(assocs []commonv1.Association) { + assocs[0].SetAssociationConf(&commonv1.AssociationConf{ + CASecretName: "elasticsearch-es-ca", + }) + assocs[1].SetAssociationConf(&commonv1.AssociationConf{ + CASecretName: "elasticsearch2-es-ca", + }) + }, + wantAssociationsLength: 2, + }, + { + name: "one es ref with ca, another no ca", + params: Params{ + Logstash: logstashv1alpha1.Logstash{ + Spec: logstashv1alpha1.LogstashSpec{ + ElasticsearchRefs: []logstashv1alpha1.ElasticsearchCluster{ + { + ObjectSelector: commonv1.ObjectSelector{Name: "uat"}, + ClusterName: "uat", + }, + { + ObjectSelector: commonv1.ObjectSelector{Name: "production"}, + ClusterName: "production", + }, + }, + }, + }, + }, + setAssocConfs: func(assocs []commonv1.Association) { + assocs[0].SetAssociationConf(&commonv1.AssociationConf{ + // No CASecretName + }) + assocs[1].SetAssociationConf(&commonv1.AssociationConf{ + CASecretName: "production-es-ca", + }) + }, + wantAssociationsLength: 1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + assocs := tt.params.Logstash.GetAssociations() + tt.setAssocConfs(assocs) + associations, err := getVolumesFromAssociations(assocs) + require.NoError(t, err) + require.Equal(t, tt.wantAssociationsLength, len(associations)) + }) + } +} diff --git a/pkg/controller/maps/controller.go b/pkg/controller/maps/controller.go index 4427a340c3..07ab717d88 100644 --- a/pkg/controller/maps/controller.go +++ b/pkg/controller/maps/controller.go @@ -233,7 +233,7 @@ func (r *ReconcileMapsServer) doReconcile(ctx context.Context, ems emsv1alpha1.E }.ReconcileCAAndHTTPCerts(ctx) if results.HasError() { _, err := results.Aggregate() - k8s.EmitErrorEvent(r.recorder, err, &ems, events.EventReconciliationError, "Certificate reconciliation error: %v", err) + k8s.MaybeEmitErrorEvent(r.recorder, err, &ems, events.EventReconciliationError, "Certificate reconciliation error: %v", err) return results, status } @@ -287,7 +287,7 @@ func (r *ReconcileMapsServer) validate(ctx context.Context, ems emsv1alpha1.Elas if err := ems.ValidateCreate(); err != nil { ulog.FromContext(ctx).Error(err, "Validation failed") - k8s.EmitErrorEvent(r.recorder, err, &ems, events.EventReasonValidation, err.Error()) + k8s.MaybeEmitErrorEvent(r.recorder, err, &ems, events.EventReasonValidation, err.Error()) return tracing.CaptureError(vctx, err) } diff --git a/pkg/controller/stackconfigpolicy/controller.go b/pkg/controller/stackconfigpolicy/controller.go index 35024a03d0..1fd4ca30e8 100644 --- a/pkg/controller/stackconfigpolicy/controller.go +++ b/pkg/controller/stackconfigpolicy/controller.go @@ -356,7 +356,7 @@ func (r *ReconcileStackConfigPolicy) validate(ctx context.Context, policy *polic if err := policy.ValidateCreate(); err != nil { ulog.FromContext(ctx).Error(err, "Validation failed") - k8s.EmitErrorEvent(r.recorder, err, policy, events.EventReasonValidation, err.Error()) + k8s.MaybeEmitErrorEvent(r.recorder, err, policy, events.EventReasonValidation, err.Error()) return tracing.CaptureError(vctx, err) } diff --git a/pkg/telemetry/fixtures.go b/pkg/telemetry/fixtures.go index 5ab7b93144..0b817787c3 100644 --- a/pkg/telemetry/fixtures.go +++ b/pkg/telemetry/fixtures.go @@ -89,6 +89,14 @@ const expectedTelemetryTemplate = `eck: helm_resource_count: 0 pod_count: 0 resource_count: 1 + logstashes: + pipeline_count: 0 + pipeline_ref_count: 0 + pod_count: 0 + resource_count: 0 + service_count: 0 + stack_monitoring_logs_count: 0 + stack_monitoring_metrics_count: 0 maps: pod_count: 0 resource_count: 0 diff --git a/pkg/telemetry/telemetry.go b/pkg/telemetry/telemetry.go index 932fa9b439..70f726518d 100644 --- a/pkg/telemetry/telemetry.go +++ b/pkg/telemetry/telemetry.go @@ -24,6 +24,7 @@ import ( esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" entv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/enterprisesearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" mapsv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/maps/v1alpha1" policyv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/stackconfigpolicy/v1alpha1" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/reconciler" @@ -40,8 +41,7 @@ const ( resourceCount = "resource_count" podCount = "pod_count" helmManagedResourceCount = "helm_resource_count" - - timestampFieldName = "timestamp" + timestampFieldName = "timestamp" ) type ECKTelemetry struct { @@ -123,6 +123,7 @@ func (r *Reporter) getResourceStats(ctx context.Context) (map[string]interface{} agentStats, mapsStats, scpStats, + logstashStats, } { key, statsPart, err := f(r.client, r.managedNamespaces) if err != nil { @@ -398,6 +399,43 @@ func agentStats(k8sClient k8s.Client, managedNamespaces []string) (string, inter return "agents", stats, nil } +func logstashStats(k8sClient k8s.Client, managedNamespaces []string) (string, interface{}, error) { + const ( + pipelineCount = "pipeline_count" + pipelineRefCount = "pipeline_ref_count" + serviceCount = "service_count" + stackMonitoringLogsCount = "stack_monitoring_logs_count" + stackMonitoringMetricsCount = "stack_monitoring_metrics_count" + ) + stats := map[string]int32{resourceCount: 0, podCount: 0, stackMonitoringLogsCount: 0, + stackMonitoringMetricsCount: 0, serviceCount: 0, pipelineCount: 0, pipelineRefCount: 0} + + var logstashList logstashv1alpha1.LogstashList + for _, ns := range managedNamespaces { + if err := k8sClient.List(context.Background(), &logstashList, client.InNamespace(ns)); err != nil { + return "", nil, err + } + + for _, ls := range logstashList.Items { + ls := ls + stats[resourceCount]++ + stats[serviceCount] += int32(len(ls.Spec.Services)) + stats[podCount] += ls.Status.AvailableNodes + stats[pipelineCount] += int32(len(ls.Spec.Pipelines)) + if ls.Spec.PipelinesRef != nil { + stats[pipelineRefCount]++ + } + if monitoring.IsLogsDefined(&ls) { + stats[stackMonitoringLogsCount]++ + } + if monitoring.IsMetricsDefined(&ls) { + stats[stackMonitoringMetricsCount]++ + } + } + } + return "logstashes", stats, nil +} + func mapsStats(k8sClient k8s.Client, managedNamespaces []string) (string, interface{}, error) { stats := map[string]int32{resourceCount: 0, podCount: 0} diff --git a/pkg/telemetry/telemetry_test.go b/pkg/telemetry/telemetry_test.go index 7d651326f8..7616b042fd 100644 --- a/pkg/telemetry/telemetry_test.go +++ b/pkg/telemetry/telemetry_test.go @@ -24,6 +24,7 @@ import ( esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" entv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/enterprisesearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" mapsv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/maps/v1alpha1" policyv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/stackconfigpolicy/v1alpha1" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/kibana" @@ -227,6 +228,69 @@ func TestNewReporter(t *testing.T) { }, }, }, + &logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + }, + Spec: logstashv1alpha1.LogstashSpec{ + Count: 3, + Monitoring: commonv1.Monitoring{ + Logs: commonv1.LogsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{Name: "monitoring"}}}, + Metrics: commonv1.MetricsMonitoring{ElasticsearchRefs: []commonv1.ObjectSelector{{Name: "monitoring"}}}, + }, + Pipelines: []commonv1.Config{ + {Data: map[string]interface{}{"pipeline.id": "main"}}, + }, + Services: []logstashv1alpha1.LogstashService{ + { + Name: "test1", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 9200}, + }, + }, + }, + }, + { + Name: "test2", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 9201}, + }, + }, + }, + }, + }, + }, + Status: logstashv1alpha1.LogstashStatus{ + AvailableNodes: 3, + }, + }, + &logstashv1alpha1.Logstash{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns2", + }, + Spec: logstashv1alpha1.LogstashSpec{ + Count: 1, + Services: []logstashv1alpha1.LogstashService{ + { + Name: "test1", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 9200}, + }, + }, + }, + }, + }, + }, + Status: logstashv1alpha1.LogstashStatus{ + AvailableNodes: 1, + }, + }, &beatv1beta1.Beat{ ObjectMeta: metav1.ObjectMeta{ Name: "beat1", @@ -417,6 +481,14 @@ func TestNewReporter(t *testing.T) { helm_resource_count: 1 pod_count: 0 resource_count: 3 + logstashes: + pipeline_count: 1 + pipeline_ref_count: 0 + pod_count: 4 + resource_count: 2 + service_count: 3 + stack_monitoring_logs_count: 1 + stack_monitoring_metrics_count: 1 maps: pod_count: 1 resource_count: 1 diff --git a/pkg/utils/k8s/k8sutils.go b/pkg/utils/k8s/k8sutils.go index bd79cb5b86..b69f0e7ad7 100644 --- a/pkg/utils/k8s/k8sutils.go +++ b/pkg/utils/k8s/k8sutils.go @@ -143,8 +143,8 @@ func GetServiceIPAddresses(svc corev1.Service) []net.IP { return ipAddrs } -// EmitErrorEvent emits an event if the error is report-worthy -func EmitErrorEvent(r record.EventRecorder, err error, obj runtime.Object, reason, message string, args ...interface{}) { +// MaybeEmitErrorEvent emits an event if the error is report-worthy +func MaybeEmitErrorEvent(r record.EventRecorder, err error, obj runtime.Object, reason, message string, args ...interface{}) { // ignore nil errors and conflict issues if err == nil || apierrors.IsConflict(err) { return diff --git a/test/e2e/beat/config_test.go b/test/e2e/beat/config_test.go index 8509bcd27e..352f169fce 100644 --- a/test/e2e/beat/config_test.go +++ b/test/e2e/beat/config_test.go @@ -66,7 +66,7 @@ func TestMetricbeatDefaultConfig(t *testing.T) { } { t.Run(tt.name, func(t *testing.T) { // only execute this test on supported versions when stack monitoring is enabled - err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion) + err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion, validations.MinStackVersion) if tt.withStackMonitoring && err != nil { t.SkipNow() } diff --git a/test/e2e/es/stack_monitoring_test.go b/test/e2e/es/stack_monitoring_test.go index 7c83fddc1a..0fd6e5ae74 100644 --- a/test/e2e/es/stack_monitoring_test.go +++ b/test/e2e/es/stack_monitoring_test.go @@ -37,7 +37,7 @@ const nodePort = int32(32767) // correctly delivered to the referenced monitoring Elasticsearch clusters. func TestESStackMonitoring(t *testing.T) { // only execute this test on supported version - err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion) + err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion, validations.MinStackVersion) if err != nil { t.SkipNow() } @@ -68,7 +68,7 @@ func TestExternalESStackMonitoring(t *testing.T) { t.SkipNow() } // only execute this test on supported version - err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion) + err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion, validations.MinStackVersion) if err != nil { t.SkipNow() } diff --git a/test/e2e/kb/stack_monitoring_test.go b/test/e2e/kb/stack_monitoring_test.go index 967e34b62a..bdafacc60b 100644 --- a/test/e2e/kb/stack_monitoring_test.go +++ b/test/e2e/kb/stack_monitoring_test.go @@ -20,7 +20,7 @@ import ( // correctly delivered to the referenced monitoring Elasticsearch clusters. func TestKBStackMonitoring(t *testing.T) { // only execute this test on supported version - err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion) + err := validations.IsSupportedVersion(test.Ctx().ElasticStackVersion, validations.MinStackVersion) if err != nil { t.SkipNow() } diff --git a/test/e2e/logstash/es_output_test.go b/test/e2e/logstash/es_output_test.go new file mode 100644 index 0000000000..ef23616da6 --- /dev/null +++ b/test/e2e/logstash/es_output_test.go @@ -0,0 +1,80 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build logstash || e2e + +package logstash + +import ( + "fmt" + "strconv" + "testing" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/elasticsearch" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/logstash" +) + +// TestLogstashEsOutput Logstash ingest events to Elasticsearch. Metrics should have `events.out` > 0. +func TestLogstashEsOutput(t *testing.T) { + + es := elasticsearch.NewBuilderWithoutSuffix("test-es"). + WithESMasterDataNodes(2, elasticsearch.DefaultResources) + + b := logstash.NewBuilder("test-ls-es-out"). + WithNodeCount(1). + WithPipelines([]commonv1.Config{ + { + Data: map[string]interface{}{ + "pipeline.id": "main", + "config.string": ` +input { exec { command => 'uptime' interval => 10 } } +output { + elasticsearch { + hosts => [ "${PRODUCTION_ES_HOSTS}" ] + ssl => true + cacert => "${PRODUCTION_ES_SSL_CERTIFICATE_AUTHORITY}" + user => "${PRODUCTION_ES_USER}" + password => "${PRODUCTION_ES_PASSWORD}" + } +} +`, + }, + }, + }). + WithElasticsearchRefs( + logstashv1alpha1.ElasticsearchCluster{ + ObjectSelector: es.Ref(), + ClusterName: "production", + }, + ) + + steps := test.StepsFunc(func(k *test.K8sClient) test.StepList { + return test.StepList{ + b.CheckMetricsRequest(k, + logstash.Request{ + Name: "stats events", + Path: "/_node/stats/events", + }, + logstash.Want{ + MatchFunc: map[string]func(string) bool{ + // number of events goes out should be > 0 + "events.out": func(cntStr string) bool { + cnt, err := strconv.Atoi(cntStr) + if err != nil { + fmt.Printf("failed to convert string %s to int", cntStr) + return false + } + + return cnt > 0 + }, + }, + }), + } + }) + + test.Sequence(nil, steps, es, b).RunSequential(t) +} diff --git a/test/e2e/logstash/logstash_test.go b/test/e2e/logstash/logstash_test.go new file mode 100644 index 0000000000..55b1276b5a --- /dev/null +++ b/test/e2e/logstash/logstash_test.go @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build logstash || e2e + +package logstash + +import ( + "testing" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/logstash" + corev1 "k8s.io/api/core/v1" +) + +func TestSingleLogstash(t *testing.T) { + name := "test-single-logstash" + logstashBuilder := logstash.NewBuilder(name). + WithNodeCount(1) + test.Sequence(nil, test.EmptySteps, logstashBuilder).RunSequential(t) +} + +func TestLogstashWithCustomService(t *testing.T) { + name := "test-multiple-custom-logstash" + service := logstashv1alpha1.LogstashService{ + Name: "test", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 9200}, + }, + }, + }, + } + logstashBuilder := (logstash.NewBuilder(name). + WithNodeCount(1). + WithServices(service)) + + test.Sequence(nil, test.EmptySteps, logstashBuilder).RunSequential(t) +} + +// This test sets a custom port for the Logstash API service +func TestLogstashWithReworkedApiService(t *testing.T) { + name := "test-multiple-custom-logstash" + service := logstashv1alpha1.LogstashService{ + Name: "api", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 9200}, + }, + }, + }, + } + logstashBuilder := (logstash.NewBuilder(name). + WithNodeCount(1). + // Change the Logstash API service port + WithConfig(map[string]interface{}{ + "api.http.port": 9200, + }). + WithServices(service)) + + test.Sequence(nil, test.EmptySteps, logstashBuilder).RunSequential(t) +} + +// This test adds a new service, and changes the port that the logstash API is served from +func TestLogstashWithCustomServiceAndAmendedApi(t *testing.T) { + name := "test-multiple-custom-logstash" + customService := logstashv1alpha1.LogstashService{ + Name: "test", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 9200}, + }, + }, + }, + } + + apiService := logstashv1alpha1.LogstashService{ + Name: "api", + Service: commonv1.ServiceTemplate{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 9601}, + }, + }, + }, + } + + logstashBuilder := (logstash.NewBuilder(name). + WithNodeCount(1). + // Change the Logstash API service port + WithConfig(map[string]interface{}{ + "api.http.port": 9601, + }). + WithServices(apiService, customService)) + + test.Sequence(nil, test.EmptySteps, logstashBuilder).RunSequential(t) +} + +func TestMultipleLogstashes(t *testing.T) { + name := "test-multiple-logstashes" + logstashBuilder := logstash.NewBuilder(name). + WithNodeCount(3) + test.Sequence(nil, test.EmptySteps, logstashBuilder).RunSequential(t) +} + +func TestLogstashServerVersionUpgradeToLatest8x(t *testing.T) { + srcVersion, dstVersion := test.GetUpgradePathTo8x(test.Ctx().ElasticStackVersion) + + name := "test-ls-version-upgrade-8x" + + logstash := logstash.NewBuilder(name). + WithNodeCount(2). + WithVersion(srcVersion). + WithRestrictedSecurityContext() + + logstashUpgraded := logstash.WithVersion(dstVersion).WithMutatedFrom(&logstash) + + test.RunMutations(t, []test.Builder{logstash}, []test.Builder{logstashUpgraded}) +} diff --git a/test/e2e/logstash/pipeline_test.go b/test/e2e/logstash/pipeline_test.go new file mode 100644 index 0000000000..5a9c20d737 --- /dev/null +++ b/test/e2e/logstash/pipeline_test.go @@ -0,0 +1,224 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build logstash || e2e + +package logstash + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/logstash" +) + +// TestPipelineConfigRefLogstash PipelineRef should be able to take pipelines.yaml from Secret. +func TestPipelineConfigRefLogstash(t *testing.T) { + secretName := "ls-generator-pipeline" + + pipelineSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: test.Ctx().ManagedNamespace(0), + }, + StringData: map[string]string{ + "pipelines.yml": ` +- pipeline.id: generator + pipeline.workers: 1 + queue.drain: false + config.string: input { generator {} } filter { sleep { time => 10 } } output { stdout { codec => dots } } +- pipeline.id: main + config.string: input { stdin{} } output { stdout{} }`, + }, + } + + before := test.StepsFunc(func(k *test.K8sClient) test.StepList { + return test.StepList{}.WithStep(test.Step{ + Name: "Create pipeline secret", + Test: test.Eventually(func() error { + return k.CreateOrUpdateSecrets(pipelineSecret) + }), + }) + }) + + name := "test-pipeline-ref" + b := logstash.NewBuilder(name). + WithNodeCount(1). + WithPipelinesConfigRef(commonv1.ConfigSource{ + SecretRef: commonv1.SecretRef{ + SecretName: secretName, + }, + }) + + steps := test.StepsFunc(func(k *test.K8sClient) test.StepList { + return test.StepList{ + b.CheckMetricsRequest(k, + logstash.Request{ + Name: "pipeline [generator]", + Path: "/_node/pipelines/generator", + }, + logstash.Want{ + Match: map[string]string{ + "pipelines.generator.workers": "1", + "status": "green", + }, + }), + test.Step{ + Name: "Delete pipeline secret", + Test: test.Eventually(func() error { + return k.DeleteSecrets(pipelineSecret) + }), + }, + } + }) + + test.Sequence(before, steps, b).RunSequential(t) +} + +// TestPipelineConfigLogstash Pipeline should be able to pass to Logstash via VolumeMount. +func TestPipelineConfigLogstash(t *testing.T) { + secretName := "ls-split-pipe" + + pipelineSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: test.Ctx().ManagedNamespace(0), + }, + StringData: map[string]string{ + "split.conf": "input { exec { command => \"uptime\" interval => 10 } } output { stdout{} }", + }, + } + + before := test.StepsFunc(func(k *test.K8sClient) test.StepList { + return test.StepList{}.WithStep(test.Step{ + Name: "Create pipeline secret", + Test: test.Eventually(func() error { + return k.CreateOrUpdateSecrets(pipelineSecret) + }), + }) + }) + + name := "test-split-pipeline" + volName := "ls-pipe-vol" + mountPath := "/usr/share/logstash/pipeline" + + b := logstash.NewBuilder(name). + WithNodeCount(1). + WithPipelines([]commonv1.Config{ + { + Data: map[string]interface{}{ + "pipeline.id": "split", + "path.config": mountPath, + }, + }, + { + Data: map[string]interface{}{ + "pipeline.id": "main", + "config.string": "input { stdin{} } output { stdout{} }", + }, + }, + }). + WithVolumes(corev1.Volume{ + Name: volName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secretName, + }, + }, + }). + WithVolumeMounts(corev1.VolumeMount{ + Name: volName, + MountPath: mountPath, + }) + + steps := test.StepsFunc(func(k *test.K8sClient) test.StepList { + return test.StepList{ + b.CheckMetricsRequest(k, + logstash.Request{ + Name: "pipeline [split]", + Path: "/_node/pipelines/split", + }, + logstash.Want{ + Match: map[string]string{ + "pipelines.split.batch_size": "125", + "status": "green", + }, + }), + test.Step{ + Name: "Delete pipeline secret", + Test: test.Eventually(func() error { + return k.DeleteSecrets(pipelineSecret) + }), + }, + } + }) + + test.Sequence(before, steps, b).RunSequential(t) +} + +// Verify that pipelines will reload when the Pipeline definition changes. +func TestLogstashPipelineReload(t *testing.T) { + name := "test-ls-reload" + + logstashFirstPipeline := logstash.NewBuilder(name).WithNodeCount(1). + WithPipelines([]commonv1.Config{ + { + Data: map[string]interface{}{ + "pipeline.id": "main", + "pipeline.workers": 1, + "config.string": "input { beats{ port => 5044}} output { stdout{} }", + }, + }, + }) + + logstashSecondPipeline := logstash.Builder{Logstash: *logstashFirstPipeline.Logstash.DeepCopy()}. + WithPipelines([]commonv1.Config{ + { + Data: map[string]interface{}{ + "pipeline.id": "main", + "pipeline.workers": 2, + "config.string": "input { beats{ port => 5044} } output { stdout{} }", + }, + }, + }). + WithMutatedFrom(&logstashFirstPipeline) + + stepsFn := func(k *test.K8sClient) test.StepList { + return test.StepList{}. + WithSteps(logstashFirstPipeline.CheckK8sTestSteps(k)). + WithStep( + logstashFirstPipeline.CheckMetricsRequest(k, + logstash.Request{ + Name: "pipeline [main]", + Path: "/_node/pipelines/main", + }, + logstash.Want{ + Match: map[string]string{ + "pipelines.main.workers": "1", + "status": "green", + }, + }), + ). + WithSteps(logstashSecondPipeline.MutationTestSteps(k)). + WithStep( + logstashSecondPipeline.CheckMetricsRequest(k, + logstash.Request{ + Name: "pipeline [main]", + Path: "/_node/pipelines/main", + }, + logstash.Want{ + Match: map[string]string{ + "pipelines.main.workers": "2", + "status": "green", + }, + }), + ) + } + + test.Sequence(nil, stepsFn, logstashFirstPipeline).RunSequential(t) +} diff --git a/test/e2e/logstash/stack_monitoring_test.go b/test/e2e/logstash/stack_monitoring_test.go new file mode 100644 index 0000000000..56c9a26b9b --- /dev/null +++ b/test/e2e/logstash/stack_monitoring_test.go @@ -0,0 +1,44 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build logstash || e2e + +package logstash + +import ( + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" + "testing" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/checks" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/elasticsearch" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/logstash" +) + +// TestLogstashStackMonitoring tests that when Logstash is configured with monitoring, its log and metrics are +// correctly delivered to the referenced monitoring Elasticsearch clusters. +func TestLogstashStackMonitoring(t *testing.T) { + // only execute this test on supported version + if version.MustParse(test.Ctx().ElasticStackVersion).LT(logstashv1alpha1.MinStackMonVersion) { + t.SkipNow() + } + + // create 1 monitored and 2 monitoring clusters to collect separately metrics and logs + metrics := elasticsearch.NewBuilder("test-ls-mon-metrics"). + WithESMasterDataNodes(2, elasticsearch.DefaultResources) + logs := elasticsearch.NewBuilder("test-ls-mon-logs"). + WithESMasterDataNodes(2, elasticsearch.DefaultResources) + monitored := logstash.NewBuilder("test-ls-mon-a"). + WithNodeCount(1). + WithMetricsMonitoring(metrics.Ref()). + WithLogsMonitoring(logs.Ref()) + + // checks that the sidecar beats have sent data in the monitoring clusters + steps := func(k *test.K8sClient) test.StepList { + return checks.MonitoredSteps(&monitored, k) + } + + test.Sequence(nil, steps, metrics, logs, monitored).RunSequential(t) +} diff --git a/test/e2e/samples_test.go b/test/e2e/samples_test.go index 127c50feea..402ab61082 100644 --- a/test/e2e/samples_test.go +++ b/test/e2e/samples_test.go @@ -17,6 +17,7 @@ import ( "k8s.io/apimachinery/pkg/util/rand" commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" "github.com/elastic/cloud-on-k8s/v2/test/e2e/cmd/run" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/apmserver" @@ -24,6 +25,7 @@ import ( "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/enterprisesearch" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/helper" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/kibana" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/logstash" ) func TestSamples(t *testing.T) { @@ -90,6 +92,31 @@ func createBuilders(t *testing.T, decoder *helper.YAMLDecoder, sampleFile, testN WithRestrictedSecurityContext(). WithLabel(run.TestNameLabel, fullTestName). WithPodLabel(run.TestNameLabel, fullTestName) + case logstash.Builder: + esRefs := make([]logstashv1alpha1.ElasticsearchCluster, 0, len(b.Logstash.Spec.ElasticsearchRefs)) + for _, ref := range b.Logstash.Spec.ElasticsearchRefs { + esRefs = append(esRefs, logstashv1alpha1.ElasticsearchCluster{ + ObjectSelector: tweakServiceRef(ref.ObjectSelector, suffix), + ClusterName: ref.ClusterName, + }) + } + metricsRefs := make([]commonv1.ObjectSelector, 0, len(b.Logstash.Spec.Monitoring.Metrics.ElasticsearchRefs)) + for _, ref := range b.Logstash.Spec.Monitoring.Metrics.ElasticsearchRefs { + metricsRefs = append(metricsRefs, tweakServiceRef(ref, suffix)) + } + logRefs := make([]commonv1.ObjectSelector, 0, len(b.Logstash.Spec.Monitoring.Logs.ElasticsearchRefs)) + for _, ref := range b.Logstash.Spec.Monitoring.Logs.ElasticsearchRefs { + logRefs = append(logRefs, tweakServiceRef(ref, suffix)) + } + + return b.WithNamespace(namespace). + WithSuffix(suffix). + WithElasticsearchRefs(esRefs...). + WithMetricsMonitoring(metricsRefs...). + WithLogsMonitoring(logRefs...). + WithRestrictedSecurityContext(). + WithLabel(run.TestNameLabel, fullTestName). + WithPodLabel(run.TestNameLabel, fullTestName) default: return b } diff --git a/test/e2e/stack_test.go b/test/e2e/stack_test.go index c2e739b0c4..620d1655e4 100644 --- a/test/e2e/stack_test.go +++ b/test/e2e/stack_test.go @@ -19,6 +19,7 @@ import ( esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" entv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/enterprisesearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/beat/filebeat" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" @@ -28,6 +29,7 @@ import ( "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/beat" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/elasticsearch" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/enterprisesearch" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/logstash" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/kibana" ) @@ -72,6 +74,9 @@ func TestVersionUpgradeOrdering(t *testing.T) { WithElasticsearchRef(esRef). WithRestrictedSecurityContext() entUpdated := ent.WithVersion(updatedVersion) + logstash := logstash.NewBuilder("ls"). + WithVersion(initialVersion) // pre 8.x doesn't require any config, but we change the version after calling + logstashUpdated := logstash.WithVersion(updatedVersion) fb := beat.NewBuilder("fb"). WithType(filebeat.Type). WithRoles(beat.AutodiscoverClusterRoleName). @@ -81,8 +86,8 @@ func TestVersionUpgradeOrdering(t *testing.T) { fb = beat.ApplyYamls(t, fb, beattests.E2EFilebeatConfig, beattests.E2EFilebeatPodTemplate) fbUpdated := fb.WithVersion(updatedVersion) - initialBuilders := []test.Builder{es, kb, apm, ent, fb} - updatedBuilders := []test.Builder{esUpdated, kbUpdated, apmUpdated, entUpdated, fbUpdated} + initialBuilders := []test.Builder{es, kb, apm, ent, fb, logstash} + updatedBuilders := []test.Builder{esUpdated, kbUpdated, apmUpdated, entUpdated, fbUpdated, logstashUpdated} versionUpgrade := func(k *test.K8sClient) test.StepList { steps := test.StepList{} @@ -101,6 +106,7 @@ func TestVersionUpgradeOrdering(t *testing.T) { ApmServer: ref(k8s.ExtractNamespacedName(&apm.ApmServer)), EnterpriseSearch: ref(k8s.ExtractNamespacedName(&ent.EnterpriseSearch)), Beat: ref(k8s.ExtractNamespacedName(&fb.Beat)), + Logstash: ref(k8s.ExtractNamespacedName(&logstash.Logstash)), } err := stackVersions.Retrieve(k.Client) // check the retrieved versions first (before returning on err) @@ -128,6 +134,7 @@ type StackResourceVersions struct { ApmServer refVersion EnterpriseSearch refVersion Beat refVersion + Logstash refVersion } func (s StackResourceVersions) IsValid() bool { @@ -140,7 +147,7 @@ func (s StackResourceVersions) IsValid() bool { } func (s StackResourceVersions) AllSetTo(version string) bool { - for _, ref := range []refVersion{s.Elasticsearch, s.Kibana, s.ApmServer, s.EnterpriseSearch, s.Beat} { + for _, ref := range []refVersion{s.Elasticsearch, s.Kibana, s.ApmServer, s.EnterpriseSearch, s.Beat, s.Logstash} { if ref.version != version { return false } @@ -149,7 +156,7 @@ func (s StackResourceVersions) AllSetTo(version string) bool { } func (s *StackResourceVersions) Retrieve(client k8s.Client) error { - calls := []func(c k8s.Client) error{s.retrieveBeat, s.retrieveApmServer, s.retrieveKibana, s.retrieveEnterpriseSearch, s.retrieveElasticsearch} + calls := []func(c k8s.Client) error{s.retrieveBeat, s.retrieveApmServer, s.retrieveKibana, s.retrieveEnterpriseSearch, s.retrieveElasticsearch, s.retrieveLogstash} // grab at least one error if multiple occur var callsErr error for _, f := range calls { @@ -223,3 +230,12 @@ func (s *StackResourceVersions) retrieveBeat(c k8s.Client) error { s.Beat.version = beat.Status.Version return nil } + +func (s *StackResourceVersions) retrieveLogstash(c k8s.Client) error { + var logstash logstashv1alpha1.Logstash + if err := c.Get(context.Background(), s.Logstash.ref, &logstash); err != nil { + return err + } + s.Logstash.version = logstash.Status.Version + return nil +} diff --git a/test/e2e/test/checks/monitoring.go b/test/e2e/test/checks/monitoring.go index 604e753cf6..d2ab0f3ff4 100644 --- a/test/e2e/test/checks/monitoring.go +++ b/test/e2e/test/checks/monitoring.go @@ -52,13 +52,13 @@ type stackMonitoringChecks struct { func (c stackMonitoringChecks) Steps() test.StepList { return test.StepList{ - c.CheckBeatSidecars(), + c.CheckBeatSidecarsInElasticsearch(), c.CheckMonitoringMetricsIndex(), c.CheckFilebeatIndex(), } } -func (c stackMonitoringChecks) CheckBeatSidecars() test.Step { +func (c stackMonitoringChecks) CheckBeatSidecarsInElasticsearch() test.Step { return test.Step{ Name: "Check that beat sidecars are running", Test: test.Eventually(func() error { diff --git a/test/e2e/test/helper/yaml.go b/test/e2e/test/helper/yaml.go index 49a017e34a..57f5d2b190 100644 --- a/test/e2e/test/helper/yaml.go +++ b/test/e2e/test/helper/yaml.go @@ -33,6 +33,7 @@ import ( esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" entv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/enterprisesearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" beatcommon "github.com/elastic/cloud-on-k8s/v2/pkg/controller/beat/common" "github.com/elastic/cloud-on-k8s/v2/test/e2e/cmd/run" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" @@ -42,6 +43,7 @@ import ( "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/elasticsearch" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/enterprisesearch" "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/kibana" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/logstash" ) type BuilderTransform func(test.Builder) test.Builder @@ -59,7 +61,7 @@ func NewYAMLDecoder() *YAMLDecoder { scheme.AddKnownTypes(beatv1beta1.GroupVersion, &beatv1beta1.Beat{}, &beatv1beta1.BeatList{}) scheme.AddKnownTypes(entv1.GroupVersion, &entv1.EnterpriseSearch{}, &entv1.EnterpriseSearchList{}) scheme.AddKnownTypes(agentv1alpha1.GroupVersion, &agentv1alpha1.Agent{}, &agentv1alpha1.AgentList{}) - + scheme.AddKnownTypes(logstashv1alpha1.GroupVersion, &logstashv1alpha1.Logstash{}, &logstashv1alpha1.LogstashList{}) scheme.AddKnownTypes(rbacv1.SchemeGroupVersion, &rbacv1.ClusterRoleBinding{}, &rbacv1.ClusterRoleBindingList{}) scheme.AddKnownTypes(rbacv1.SchemeGroupVersion, &rbacv1.ClusterRole{}, &rbacv1.ClusterRoleList{}) scheme.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.ServiceAccount{}, &corev1.ServiceAccountList{}) @@ -108,6 +110,10 @@ func (yd *YAMLDecoder) ToBuilders(reader *bufio.Reader, transform BuilderTransfo b := enterprisesearch.NewBuilderWithoutSuffix(decodedObj.Name) b.EnterpriseSearch = *decodedObj builder = transform(b) + case *logstashv1alpha1.Logstash: + b := logstash.NewBuilderWithoutSuffix(decodedObj.Name) + b.Logstash = *decodedObj + builder = transform(b) default: return builders, fmt.Errorf("unexpected object type: %t", decodedObj) } @@ -312,6 +318,24 @@ func transformToE2E(namespace, fullTestName, suffix string, transformers []Build b = b.WithPodTemplateServiceAccount(b.PodTemplate.Spec.ServiceAccountName + "-" + suffix) } + builder = b + case *logstashv1alpha1.Logstash: + b := logstash.NewBuilderWithoutSuffix(decodedObj.Name) + + esRefs := make([]logstashv1alpha1.ElasticsearchCluster, 0, len(b.Logstash.Spec.ElasticsearchRefs)) + for _, ref := range b.Logstash.Spec.ElasticsearchRefs { + esRefs = append(esRefs, logstashv1alpha1.ElasticsearchCluster{ + ObjectSelector: tweakServiceRef(ref.ObjectSelector, suffix), + ClusterName: ref.ClusterName, + }) + } + + b = b.WithNamespace(namespace). + WithSuffix(suffix). + WithElasticsearchRefs(esRefs...). + WithLabel(run.TestNameLabel, fullTestName). + WithPodLabel(run.TestNameLabel, fullTestName) + builder = b case *corev1.ServiceAccount: decodedObj.Namespace = namespace diff --git a/test/e2e/test/k8s_client.go b/test/e2e/test/k8s_client.go index 8989fc2e82..bb56fee30b 100644 --- a/test/e2e/test/k8s_client.go +++ b/test/e2e/test/k8s_client.go @@ -33,6 +33,7 @@ import ( esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" entv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/enterprisesearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/agent" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/apmserver" beatcommon "github.com/elastic/cloud-on-k8s/v2/pkg/controller/beat/common" @@ -42,6 +43,7 @@ import ( "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/volume" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/enterprisesearch" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/kibana" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash" "github.com/elastic/cloud-on-k8s/v2/pkg/controller/maps" "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" ) @@ -92,6 +94,9 @@ func CreateClient() (k8s.Client, error) { if err := agentv1alpha1.AddToScheme(scheme.Scheme); err != nil { return nil, err } + if err := logstashv1alpha1.AddToScheme(scheme.Scheme); err != nil { + return nil, err + } client, err := k8sclient.New(cfg, k8sclient.Options{Scheme: scheme.Scheme}) if err != nil { return nil, err @@ -358,6 +363,15 @@ func (k K8sClient) CreateOrUpdateSecrets(secrets ...corev1.Secret) error { return nil } +func (k *K8sClient) DeleteSecrets(secrets ...corev1.Secret) error { + for i := range secrets { + if err := k.Client.Delete(context.Background(), &secrets[i]); err != nil { + return err + } + } + return nil +} + func (k K8sClient) CreateOrUpdate(objs ...client.Object) error { for _, obj := range objs { // create a copy to ensure that the original object is not modified @@ -431,6 +445,15 @@ func AgentPodListOptions(agentNamespace, agentName string) []k8sclient.ListOptio return []k8sclient.ListOption{ns, matchLabels} } +func LogstashPodListOptions(logstashNamespace, logstashName string) []k8sclient.ListOption { + ns := k8sclient.InNamespace(logstashNamespace) + matchLabels := k8sclient.MatchingLabels(map[string]string{ + commonv1.TypeLabelName: logstash.TypeLabelValue, + logstash.NameLabelName: logstashName, + }) + return []k8sclient.ListOption{ns, matchLabels} +} + func BeatPodListOptions(beatNamespace, beatName, beatType string) []k8sclient.ListOption { ns := k8sclient.InNamespace(beatNamespace) matchLabels := k8sclient.MatchingLabels(map[string]string{ diff --git a/test/e2e/test/logstash/builder.go b/test/e2e/test/logstash/builder.go new file mode 100644 index 0000000000..851fe504b7 --- /dev/null +++ b/test/e2e/test/logstash/builder.go @@ -0,0 +1,236 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "sigs.k8s.io/controller-runtime/pkg/client" + + commonv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/cmd/run" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" +) + +type Builder struct { + Logstash logstashv1alpha1.Logstash + MutatedFrom *Builder +} + +func NewBuilder(name string) Builder { + return newBuilder(name, rand.String(4)) +} + +func NewBuilderWithoutSuffix(name string) Builder { + return newBuilder(name, "") +} + +func newBuilder(name, randSuffix string) Builder { + meta := metav1.ObjectMeta{ + Name: name, + Namespace: test.Ctx().ManagedNamespace(0), + } + def := test.Ctx().ImageDefinitionFor(logstashv1alpha1.Kind) + return Builder{ + Logstash: logstashv1alpha1.Logstash{ + ObjectMeta: meta, + Spec: logstashv1alpha1.LogstashSpec{ + Count: 1, + Version: def.Version, + }, + }, + }. + WithImage(def.Image). + WithSuffix(randSuffix). + WithLabel(run.TestNameLabel, name). + WithPodLabel(run.TestNameLabel, name) +} + +func (b Builder) WithImage(image string) Builder { + b.Logstash.Spec.Image = image + return b +} + +func (b Builder) WithSuffix(suffix string) Builder { + if suffix != "" { + b.Logstash.ObjectMeta.Name = b.Logstash.ObjectMeta.Name + "-" + suffix + } + return b +} + +func (b Builder) WithLabel(key, value string) Builder { + if b.Logstash.Labels == nil { + b.Logstash.Labels = make(map[string]string) + } + b.Logstash.Labels[key] = value + + return b +} + +// WithRestrictedSecurityContext helps to enforce a restricted security context on the objects. +func (b Builder) WithRestrictedSecurityContext() Builder { + b.Logstash.Spec.PodTemplate.Spec.SecurityContext = test.DefaultSecurityContext() + return b +} + +func (b Builder) WithNamespace(namespace string) Builder { + b.Logstash.ObjectMeta.Namespace = namespace + return b +} + +func (b Builder) WithVersion(version string) Builder { + b.Logstash.Spec.Version = version + return b +} + +func (b Builder) WithNodeCount(count int) Builder { + b.Logstash.Spec.Count = int32(count) + return b +} + +// WithPodLabel sets the label in the pod template. All invocations can be removed when +// https://github.com/elastic/cloud-on-k8s/issues/2652 is implemented. +func (b Builder) WithPodLabel(key, value string) Builder { + labels := b.Logstash.Spec.PodTemplate.Labels + if labels == nil { + labels = make(map[string]string) + } + labels[key] = value + b.Logstash.Spec.PodTemplate.Labels = labels + return b +} + +func (b Builder) WithMutatedFrom(mutatedFrom *Builder) Builder { + b.MutatedFrom = mutatedFrom + return b +} + +func (b Builder) WithServices(services ...logstashv1alpha1.LogstashService) Builder { + b.Logstash.Spec.Services = append(b.Logstash.Spec.Services, services...) + return b +} + +func (b Builder) WithPipelines(pipelines []commonv1.Config) Builder { + b.Logstash.Spec.Pipelines = pipelines + return b +} + +func (b Builder) WithPipelinesConfigRef(ref commonv1.ConfigSource) Builder { + b.Logstash.Spec.PipelinesRef = &ref + return b +} + +func (b Builder) WithVolumes(vols ...corev1.Volume) Builder { + b.Logstash.Spec.PodTemplate.Spec.Volumes = append(b.Logstash.Spec.PodTemplate.Spec.Volumes, vols...) + return b +} + +func (b Builder) WithVolumeMounts(mounts ...corev1.VolumeMount) Builder { + if b.Logstash.Spec.PodTemplate.Spec.Containers == nil { + b.Logstash.Spec.PodTemplate.Spec.Containers = []corev1.Container{ + { + Name: "logstash", + VolumeMounts: mounts, + }, + } + return b + } + + if b.Logstash.Spec.PodTemplate.Spec.Containers[0].VolumeMounts == nil { + b.Logstash.Spec.PodTemplate.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{} + } + b.Logstash.Spec.PodTemplate.Spec.Containers[0].VolumeMounts = append(b.Logstash.Spec.PodTemplate.Spec.Containers[0].VolumeMounts, mounts...) + + return b +} + +func (b Builder) WithElasticsearchRefs(refs ...logstashv1alpha1.ElasticsearchCluster) Builder { + b.Logstash.Spec.ElasticsearchRefs = refs + return b +} + +func (b Builder) WithMetricsMonitoring(metricsESRef ...commonv1.ObjectSelector) Builder { + b.Logstash.Spec.Monitoring.Metrics.ElasticsearchRefs = metricsESRef + return b +} + +func (b Builder) WithLogsMonitoring(logsESRef ...commonv1.ObjectSelector) Builder { + b.Logstash.Spec.Monitoring.Logs.ElasticsearchRefs = logsESRef + return b +} + +func (b Builder) GetMetricsIndexPattern() string { + return ".monitoring-logstash-8-mb" +} + +func (b Builder) WithConfig(config map[string]interface{}) Builder { + b.Logstash.Spec.Config = &commonv1.Config{ + Data: config, + } + return b +} + +func (b Builder) Name() string { + return b.Logstash.Name +} + +func (b Builder) Namespace() string { + return b.Logstash.Namespace +} + +func (b Builder) GetLogsCluster() *types.NamespacedName { + if len(b.Logstash.Spec.Monitoring.Logs.ElasticsearchRefs) == 0 { + return nil + } + logsCluster := b.Logstash.Spec.Monitoring.Logs.ElasticsearchRefs[0].NamespacedName() + return &logsCluster +} + +func (b Builder) GetMetricsCluster() *types.NamespacedName { + if len(b.Logstash.Spec.Monitoring.Metrics.ElasticsearchRefs) == 0 { + return nil + } + metricsCluster := b.Logstash.Spec.Monitoring.Metrics.ElasticsearchRefs[0].NamespacedName() + return &metricsCluster +} + +func (b Builder) NSN() types.NamespacedName { + return k8s.ExtractNamespacedName(&b.Logstash) +} + +func (b Builder) Kind() string { + return logstashv1alpha1.Kind +} + +func (b Builder) Spec() interface{} { + return b.Logstash.Spec +} + +func (b Builder) Count() int32 { + return b.Logstash.Spec.Count +} + +func (b Builder) ServiceName() string { + return b.Logstash.Name + "-ls-api" +} + +func (b Builder) ListOptions() []client.ListOption { + return test.LogstashPodListOptions(b.Logstash.Namespace, b.Logstash.Name) +} + +func (b Builder) SkipTest() bool { + supportedVersions := version.SupportedLogstashVersions + + ver := version.MustParse(b.Logstash.Spec.Version) + return supportedVersions.WithinRange(ver) != nil +} + +var _ test.Builder = Builder{} +var _ test.Subject = Builder{} diff --git a/test/e2e/test/logstash/checks.go b/test/e2e/test/logstash/checks.go new file mode 100644 index 0000000000..fe07d35289 --- /dev/null +++ b/test/e2e/test/logstash/checks.go @@ -0,0 +1,290 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "encoding/json" + "fmt" + + corev1 "k8s.io/api/core/v1" + + v1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/settings" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" +) + +type Request struct { + Name string + Path string +} + +type Want struct { + // Key is field path of ucfg.Config. Value is the expected string + // example, pipelines.demo.batch_size : 2 + Match map[string]string + MatchFunc map[string]func(string) bool +} + +// CheckSecrets checks that expected secrets have been created. +func CheckSecrets(b Builder, k *test.K8sClient) test.Step { + return test.CheckSecretsContent(k, b.Logstash.Namespace, func() []test.ExpectedSecret { + logstashName := b.Logstash.Name + // hardcode all secret names and keys to catch any breaking change + expected := []test.ExpectedSecret{ + { + Name: logstashName + "-ls-config", + Keys: []string{"logstash.yml"}, + Labels: map[string]string{ + "eck.k8s.elastic.co/credentials": "true", + "logstash.k8s.elastic.co/name": logstashName, + }, + }, + { + Name: logstashName + "-ls-pipeline", + Keys: []string{"pipelines.yml"}, + Labels: map[string]string{ + "eck.k8s.elastic.co/credentials": "true", + "logstash.k8s.elastic.co/name": logstashName, + }, + }, + } + + // check ES association user/ secret + nn := k8s.ExtractNamespacedName(&b.Logstash) + lsName := nn.Name + lsNamespace := nn.Namespace + + for _, ref := range b.Logstash.Spec.ElasticsearchRefs { + esNamespace := ref.WithDefaultNamespace(lsNamespace).Namespace + expected = append(expected, + test.ExpectedSecret{ + Name: fmt.Sprintf("%s-logstash-es-%s-%s-ca", lsName, esNamespace, ref.Name), + Keys: []string{"ca.crt", "tls.crt"}, + Labels: map[string]string{ + "elasticsearch.k8s.elastic.co/cluster-name": ref.Name, + "elasticsearch.k8s.elastic.co/cluster-namespace": esNamespace, + "logstashassociation.k8s.elastic.co/name": lsName, + "logstashassociation.k8s.elastic.co/namespace": lsNamespace, + }, + }, + ) + expected = append(expected, + test.ExpectedSecret{ + Name: fmt.Sprintf("%s-%s-%s-%s-logstash-user", lsNamespace, lsName, esNamespace, ref.Name), + Keys: []string{"name", "passwordHash", "userRoles"}, + Labels: map[string]string{ + "elasticsearch.k8s.elastic.co/cluster-name": ref.Name, + "elasticsearch.k8s.elastic.co/cluster-namespace": esNamespace, + "logstashassociation.k8s.elastic.co/name": lsName, + "logstashassociation.k8s.elastic.co/namespace": lsNamespace, + }, + }, + ) + } + return expected + }) +} + +func CheckStatus(b Builder, k *test.K8sClient) test.Step { + return test.Step{ + Name: "Logstash status should have the correct status", + Test: test.Eventually(func() error { + var logstash logstashv1alpha1.Logstash + if err := k.Client.Get(context.Background(), k8s.ExtractNamespacedName(&b.Logstash), &logstash); err != nil { + return err + } + + logstash.Status.ObservedGeneration = 0 + + // pod status + expected := logstashv1alpha1.LogstashStatus{ + ExpectedNodes: b.Logstash.Spec.Count, + AvailableNodes: b.Logstash.Spec.Count, + Version: b.Logstash.Spec.Version, + } + + if (logstash.Status.ExpectedNodes != expected.ExpectedNodes) || + (logstash.Status.AvailableNodes != expected.AvailableNodes) || + (logstash.Status.Version != expected.Version) { + return fmt.Errorf("expected status %+v but got %+v", expected, logstash.Status) + } + + expectedMonitoringInStatus := uniqueAssociationCount(logstash.Spec.Monitoring.Metrics.ElasticsearchRefs, logstash.Spec.Monitoring.Logs.ElasticsearchRefs) + // monitoring status + actualMonitoringInStatus := len(logstash.Status.MonitoringAssociationStatus) + if expectedMonitoringInStatus != actualMonitoringInStatus { + return fmt.Errorf("expected %d monitoring associations in status but got %d", expectedMonitoringInStatus, actualMonitoringInStatus) + } + for a, s := range logstash.Status.MonitoringAssociationStatus { + if s != v1.AssociationEstablished { + return fmt.Errorf("monitoring association %s has status %s ", a, s) + } + } + + // elasticsearch status + expectedEsRefsInStatus := len(logstash.Spec.ElasticsearchRefs) + actualEsRefsInStatus := len(logstash.Status.ElasticsearchAssociationsStatus) + if expectedEsRefsInStatus != actualEsRefsInStatus { + return fmt.Errorf("expected %d elasticsearch associations in status but got %d", expectedEsRefsInStatus, actualEsRefsInStatus) + } + for a, s := range logstash.Status.ElasticsearchAssociationsStatus { + if s != v1.AssociationEstablished { + return fmt.Errorf("elasticsearch association %s has status %s ", a, s) + } + } + + return nil + }), + } +} + +func uniqueAssociationCount(refsList ...[]v1.ObjectSelector) int { + uniqueAssociations := make(map[v1.ObjectSelector]struct{}) + for _, refs := range refsList { + for _, val := range refs { + uniqueAssociations[val] = struct{}{} + } + } + return len(uniqueAssociations) +} + +func (b Builder) CheckStackTestSteps(k *test.K8sClient) test.StepList { + return test.StepList{ + b.CheckMetricsRequest(k, + Request{ + Name: "metrics", + Path: "/", + }, + Want{ + Match: map[string]string{"status": "green"}, + }), + b.CheckMetricsRequest(k, + Request{ + Name: "default pipeline", + Path: "/_node/pipelines/main", + }, + Want{ + Match: map[string]string{ + "pipelines.main.batch_size": "125", + "status": "green", + }, + }), + } +} + +func (b Builder) CheckMetricsRequest(k *test.K8sClient, req Request, want Want) test.Step { + return test.Step{ + Name: fmt.Sprintf("Logstash should respond to %s requests", req.Name), + Test: test.Eventually(func() error { + // send request and parse to map obj + client, err := NewLogstashClient(b.Logstash, k) + if err != nil { + return err + } + + bytes, err := DoRequest(client, b.Logstash, "GET", req.Path) + if err != nil { + return err + } + + var response map[string]interface{} + err = json.Unmarshal(bytes, &response) + if err != nil { + return err + } + + // parse response to ucfg.Config for traverse + res, err := settings.NewCanonicalConfigFrom(response) + if err != nil { + return err + } + + // check expected string + for k, v := range want.Match { + str, err := res.String(k) + if err != nil { + return err + } + if str != v { + return fmt.Errorf("expected %s to be %s but got %s", k, v, str) + } + } + + // check expected expression + for k, f := range want.MatchFunc { + str, err := res.String(k) + if err != nil { + return err + } + if !f(str) { + return fmt.Errorf("expression failed: %s got %s", k, str) + } + } + + return nil + }), + } +} + +func CheckServices(b Builder, k *test.K8sClient) test.Step { + return test.Step{ + Name: "Logstash services should be created", + Test: test.Eventually(func() error { + serviceNames := map[string]struct{}{} + serviceNames[logstashv1alpha1.APIServiceName(b.Logstash.Name)] = struct{}{} + for _, r := range b.Logstash.Spec.Services { + serviceNames[logstashv1alpha1.UserServiceName(b.Logstash.Name, r.Name)] = struct{}{} + } + for serviceName := range serviceNames { + svc, err := k.GetService(b.Logstash.Namespace, serviceName) + if err != nil { + return err + } + if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + if len(svc.Status.LoadBalancer.Ingress) == 0 { + return fmt.Errorf("load balancer for %s not ready yet", svc.Name) + } + } + } + return nil + }), + } +} + +// CheckServicesEndpoints checks that services have the expected number of endpoints +func CheckServicesEndpoints(b Builder, k *test.K8sClient) test.Step { + return test.Step{ + Name: "Logstash services should have endpoints", + Test: test.Eventually(func() error { + servicePorts := make(map[string]int32) + servicePorts[logstashv1alpha1.APIServiceName(b.Logstash.Name)] = b.Logstash.Spec.Count + for _, r := range b.Logstash.Spec.Services { + portsPerService := int32(len(r.Service.Spec.Ports)) + servicePorts[logstashv1alpha1.UserServiceName(b.Logstash.Name, r.Name)] = b.Logstash.Spec.Count * portsPerService + } + + for endpointName, addrPortCount := range servicePorts { + if addrPortCount == 0 { + continue + } + endpoints, err := k.GetEndpoints(b.Logstash.Namespace, endpointName) + if err != nil { + return err + } + if len(endpoints.Subsets) == 0 { + return fmt.Errorf("no subset for endpoint %s", endpointName) + } + if int32(len(endpoints.Subsets[0].Addresses)*len(endpoints.Subsets[0].Ports)) != addrPortCount { + return fmt.Errorf("%d addresses and %d ports found for endpoint %s, expected %d", len(endpoints.Subsets[0].Addresses), + len(endpoints.Subsets[0].Ports), endpointName, addrPortCount) + } + } + return nil + }), + } +} diff --git a/test/e2e/test/logstash/http_client.go b/test/e2e/test/logstash/http_client.go new file mode 100644 index 0000000000..c5b271c242 --- /dev/null +++ b/test/e2e/test/logstash/http_client.go @@ -0,0 +1,64 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "crypto/x509" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + ls "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash/network" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" +) + +// TODO refactor identical to Kibana client +func NewLogstashClient(logstash v1alpha1.Logstash, k *test.K8sClient) (*http.Client, error) { + var caCerts []*x509.Certificate + // TODO: Integrate with TLS on metrics API + // if ems.Spec.HTTP.TLS.Enabled() { + // crts, err := k.GetHTTPCerts(maps.EMSNamer, ems.Namespace, ems.Name) + // if err != nil { + // return nil, err + // } + // caCerts = crts + //} + return test.NewHTTPClient(caCerts), nil +} + +func DoRequest(client *http.Client, logstash v1alpha1.Logstash, method, path string) ([]byte, error) { + var scheme = "http" + var port = network.HTTPPort + for _, service := range logstash.Spec.Services { + if service.Name == ls.LogstashAPIServiceName && len(service.Service.Spec.Ports) > 0 { + port = int(service.Service.Spec.Ports[0].Port) + } + } + + url, err := url.Parse(fmt.Sprintf("%s://%s.%s.svc:%d%s", scheme, v1alpha1.APIServiceName(logstash.Name), logstash.Namespace, port, path)) + + if err != nil { + return nil, fmt.Errorf("while parsing URL: %w", err) + } + + request, err := http.NewRequestWithContext(context.Background(), method, url.String(), nil) + if err != nil { + return nil, fmt.Errorf("while constructing request: %w", err) + } + + resp, err := client.Do(request) + if err != nil { + return nil, fmt.Errorf("while making request: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("fail to request %s, status is %d)", path, resp.StatusCode) + } + return io.ReadAll(resp.Body) +} diff --git a/test/e2e/test/logstash/steps.go b/test/e2e/test/logstash/steps.go new file mode 100644 index 0000000000..6a0b3a1634 --- /dev/null +++ b/test/e2e/test/logstash/steps.go @@ -0,0 +1,149 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package logstash + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + + logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/cmd/run" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/checks" + "github.com/elastic/cloud-on-k8s/v2/test/e2e/test/generation" +) + +func (b Builder) InitTestSteps(k *test.K8sClient) test.StepList { + return []test.Step{ + { + Name: "K8S should be accessible", + Test: test.Eventually(func() error { + pods := corev1.PodList{} + return k.Client.List(context.Background(), &pods) + }), + }, + { + Name: "Label test pods", + Test: test.Eventually(func() error { + return test.LabelTestPods( + k.Client, + test.Ctx(), + run.TestNameLabel, + b.Logstash.Labels[run.TestNameLabel]) + }), + Skip: func() bool { + return test.Ctx().Local + }, + }, + { + Name: "Logstash CRDs should exist", + Test: test.Eventually(func() error { + crd := &logstashv1alpha1.LogstashList{} + return k.Client.List(context.Background(), crd) + }), + }, + { + Name: "Remove Logstash if it already exists", + Test: test.Eventually(func() error { + err := k.Client.Delete(context.Background(), &b.Logstash) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + // wait for pods to disappear + return k.CheckPodCount(0, test.LogstashPodListOptions(b.Logstash.Namespace, b.Logstash.Name)...) + }), + }, + } +} + +func (b Builder) CreationTestSteps(k *test.K8sClient) test.StepList { + return test.StepList{ + { + Name: "Submitting the Logstash resource should succeed", + Test: test.Eventually(func() error { + return k.CreateOrUpdate(&b.Logstash) + }), + }, + { + Name: "Logstash should be created", + Test: test.Eventually(func() error { + var logstash logstashv1alpha1.Logstash + return k.Client.Get(context.Background(), k8s.ExtractNamespacedName(&b.Logstash), &logstash) + }), + }, + } +} + +func (b Builder) CheckK8sTestSteps(k *test.K8sClient) test.StepList { + return test.StepList{ + CheckSecrets(b, k), + CheckStatus(b, k), + CheckServices(b, k), + CheckServicesEndpoints(b, k), + checks.CheckPods(b, k), + } +} + +func (b Builder) UpgradeTestSteps(k *test.K8sClient) test.StepList { + return test.StepList{ + { + Name: "Updating the Logstash spec succeed", + Test: test.Eventually(func() error { + var logstash logstashv1alpha1.Logstash + if err := k.Client.Get(context.Background(), k8s.ExtractNamespacedName(&b.Logstash), &logstash); err != nil { + return err + } + logstash.Spec = b.Logstash.Spec + return k.Client.Update(context.Background(), &logstash) + }), + }} +} + +func (b Builder) MutationTestSteps(k *test.K8sClient) test.StepList { + var logstashGenerationBeforeMutation, logstashObservedGenerationBeforeMutation int64 + isMutated := b.MutatedFrom != nil + return test.StepList{ + generation.RetrieveGenerationsStep(&b.Logstash, k, &logstashGenerationBeforeMutation, &logstashObservedGenerationBeforeMutation), + }.WithSteps(b.UpgradeTestSteps(k)). + WithSteps(b.CheckK8sTestSteps(k)). + WithSteps(b.CheckStackTestSteps(k)). + WithStep(generation.CompareObjectGenerationsStep(&b.Logstash, k, isMutated, logstashGenerationBeforeMutation, logstashObservedGenerationBeforeMutation)) +} + +func (b Builder) DeletionTestSteps(k *test.K8sClient) test.StepList { + return test.StepList{ + { + Name: "Deleting Logstash should return no error", + Test: test.Eventually(func() error { + err := k.Client.Delete(context.Background(), &b.Logstash) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil + }), + }, + { + Name: "Logstash should not be there anymore", + Test: test.Eventually(func() error { + objCopy := k8s.DeepCopyObject(&b.Logstash) + err := k.Client.Get(context.Background(), k8s.ExtractNamespacedName(&b.Logstash), objCopy) + if err != nil && apierrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("expected 404 not found API error here. got: %w", err) + }), + }, + { + Name: "Logstash pods should eventually be removed", + Test: test.Eventually(func() error { + return k.CheckPodCount(0, b.ListOptions()...) + }), + }, + } +} From 294f30230f310f7a049f16869fcf5c535dcf01e2 Mon Sep 17 00:00:00 2001 From: Thibault Richard Date: Fri, 28 Apr 2023 17:21:11 +0200 Subject: [PATCH 05/26] Fix default `elasticsearch-data` volumeMount configuration (#6725) This commit ensures that we take into account the volumes provided by the user to append the default elasticsearch-data volumeMount. --- .../elasticsearch/nodespec/volumes.go | 3 +- .../elasticsearch/nodespec/volumes_test.go | 106 ++++++++++++++++++ 2 files changed, 108 insertions(+), 1 deletion(-) create mode 100644 pkg/controller/elasticsearch/nodespec/volumes_test.go diff --git a/pkg/controller/elasticsearch/nodespec/volumes.go b/pkg/controller/elasticsearch/nodespec/volumes.go index e7a0fb0726..2e230da7ba 100644 --- a/pkg/controller/elasticsearch/nodespec/volumes.go +++ b/pkg/controller/elasticsearch/nodespec/volumes.go @@ -120,7 +120,8 @@ func buildVolumes( volumeMounts = append(volumeMounts, fileSettingsVolume.VolumeMount()) } - volumeMounts = esvolume.AppendDefaultDataVolumeMount(volumeMounts, volumes) + // include the user-provided PodTemplate volumes as the user may have defined the data volume there (e.g.: emptyDir or hostpath volume) + volumeMounts = esvolume.AppendDefaultDataVolumeMount(volumeMounts, append(volumes, nodeSpec.PodTemplate.Spec.Volumes...)) return volumes, volumeMounts } diff --git a/pkg/controller/elasticsearch/nodespec/volumes_test.go b/pkg/controller/elasticsearch/nodespec/volumes_test.go new file mode 100644 index 0000000000..8884834d77 --- /dev/null +++ b/pkg/controller/elasticsearch/nodespec/volumes_test.go @@ -0,0 +1,106 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +package nodespec + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/version" + "github.com/elastic/cloud-on-k8s/v2/pkg/controller/common/volume" + esvolume "github.com/elastic/cloud-on-k8s/v2/pkg/controller/elasticsearch/volume" +) + +// Test_BuildVolumes_DataVolumeMountPath tests that the elasticsearch-data volumeMount is always set. +func Test_BuildVolumes_DataVolumeMountPath(t *testing.T) { + hostPathType := corev1.HostPathDirectoryOrCreate + + tt := []struct { + name string + nodeSpec esv1.NodeSet + want []corev1.ContainerPort + }{ + { + name: "with eck default data PVC", + nodeSpec: esv1.NodeSet{ + VolumeClaimTemplates: esvolume.DefaultVolumeClaimTemplates, + }, + }, + { + name: "with user provided data PVC", + nodeSpec: esv1.NodeSet{ + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "elasticsearch-data", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("42Ti"), + }, + }, + }, + }}, + }, + }, + { + name: "with user provided data empty volume", + nodeSpec: esv1.NodeSet{ + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "elasticsearch-data", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + }, + }, + }, + }, + { + name: "with user provided data hostpath volume", + nodeSpec: esv1.NodeSet{ + PodTemplate: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{{ + Name: "elasticsearch-data", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/mnt/data", + Type: &hostPathType, + }, + }, + }}, + }, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + _, volumeMounts := buildVolumes("esname", version.MustParse("8.8.0"), tc.nodeSpec, nil, volume.DownwardAPI{}) + assert.True(t, contains(volumeMounts, "elasticsearch-data", "/usr/share/elasticsearch/data")) + }) + } +} + +func contains(volumeMounts []corev1.VolumeMount, volumeMountName, volumeMountPath string) bool { + for _, vm := range volumeMounts { + if vm.Name == volumeMountName && vm.MountPath == volumeMountPath { + return true + } + } + return false +} From d9b4774423918af6a6da687906b1074fc7de0890 Mon Sep 17 00:00:00 2001 From: Thibault Richard Date: Fri, 28 Apr 2023 17:22:38 +0200 Subject: [PATCH 06/26] Add operator flag to define global container repository (#6737) This adds a new flag `--container-repository` to the operator to be able to specify a global container repository. With this it is now possible to use DockerHub images for example: `--container-registry docker.io --container-repository=elastic`. --------- Co-authored-by: Peter Brachwitz --- cmd/manager/main.go | 12 ++++++++ deploy/eck-operator/templates/configmap.yaml | 3 ++ deploy/eck-operator/values.yaml | 3 ++ docs/operating-eck/air-gapped.asciidoc | 15 ++++++++-- docs/operating-eck/operator-config.asciidoc | 1 + pkg/controller/common/container/container.go | 29 ++++++++++++++++--- .../common/container/container_test.go | 28 ++++++++++++++---- pkg/controller/common/operator/flags.go | 1 + 8 files changed, 81 insertions(+), 11 deletions(-) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 2128533822..7eab85925b 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -183,6 +183,11 @@ func Command() *cobra.Command { container.DefaultContainerRegistry, "Container registry to use when downloading Elastic Stack container images", ) + cmd.Flags().String( + operator.ContainerRepositoryFlag, + "", + "Container repository to use when downloading Elastic Stack container images", + ) cmd.Flags().String( operator.ContainerSuffixFlag, "", @@ -474,6 +479,13 @@ func startOperator(ctx context.Context) error { log.Info("Setting default container registry", "container_registry", containerRegistry) container.SetContainerRegistry(containerRegistry) + // set the default container repository + containerRepository := viper.GetString(operator.ContainerRepositoryFlag) + if containerRepository != "" { + log.Info("Setting default container repository", "container_repository", containerRepository) + container.SetContainerRepository(containerRepository) + } + // allow users to specify a container suffix unless --ubi-only mode is active suffix := viper.GetString(operator.ContainerSuffixFlag) if len(suffix) > 0 { diff --git a/deploy/eck-operator/templates/configmap.yaml b/deploy/eck-operator/templates/configmap.yaml index edc4f8fcec..fa604e5212 100644 --- a/deploy/eck-operator/templates/configmap.yaml +++ b/deploy/eck-operator/templates/configmap.yaml @@ -14,6 +14,9 @@ data: {{- with .Values.config.containerSuffix }} container-suffix: {{ . }} {{- end }} + {{- with .Values.config.containerRepository }} + container-repository: {{ . }} + {{- end }} max-concurrent-reconciles: {{ int .Values.config.maxConcurrentReconciles }} {{- with .Values.config.passwordHashCacheSize }} password-hash-cache-size: {{ int . }} diff --git a/deploy/eck-operator/values.yaml b/deploy/eck-operator/values.yaml index 2efceea5da..8426abfc76 100644 --- a/deploy/eck-operator/values.yaml +++ b/deploy/eck-operator/values.yaml @@ -160,6 +160,9 @@ config: # containerRegistry to use for pulling Elasticsearch and other application container images. containerRegistry: docker.elastic.co + # containerRepository to use for pulling Elasticsearch and other application container images. + # containerRepository: "" + # containerSuffix suffix to be appended to container images by default. Cannot be combined with -ubiOnly flag # containerSuffix: "" diff --git a/docs/operating-eck/air-gapped.asciidoc b/docs/operating-eck/air-gapped.asciidoc index 0d4dbb430b..726df73637 100644 --- a/docs/operating-eck/air-gapped.asciidoc +++ b/docs/operating-eck/air-gapped.asciidoc @@ -39,13 +39,24 @@ To make use of your mirrored images you can either set the image for each applic When creating custom resources ({eck_resources_list}), the operator defaults to using container images pulled from the `docker.elastic.co` registry. If you are in an environment where external network access is restricted, you can configure the operator to use a different default container registry by starting the operator with the `--container-registry` command-line flag. Check <<{p}-operator-config>> for more information on how to configure the operator using command-line flags and environment variables. -The operator expects container images to be located at specific paths in the default container registry. Make sure that your container images are stored at the right path and are tagged correctly with the stack version number. For example, if your private registry is `my.registry` and you wish to deploy components from stack version {version}, the following image paths should exist: - +The operator expects container images to be located at specific repositories in the default container registry. Make sure that your container images are stored in the right repositories and are tagged correctly with the Stack version number. For example, if your private registry is `my.registry` and you wish to deploy components from Stack version {version}, the following image names should exist: * +my.registry/elasticsearch/elasticsearch:{version}+ * +my.registry/kibana/kibana:{version}+ * +my.registry/apm/apm-server:{version}+ +[float] +[id="{p}-container-repository-override"] +== Use a global container repository + +If you cannot follow the default Elastic image repositories naming scheme, you can configure the operator to use a different container repository by starting the operator with the `--container-repository` command-line flag. +Check <<{p}-operator-config>> for more information on how to configure the operator using command-line flags and environment variables. + +For example, if your private registry is `my.registry` and all Elastic images are located under the `elastic` repository, the following image names should exist: + +* +my.registry/elastic/elasticsearch:{version}+ +* +my.registry/elastic/kibana:{version}+ +* +my.registry/elastic/apm-server:{version}+ [float] [id="{p}-eck-diag-air-gapped"] diff --git a/docs/operating-eck/operator-config.asciidoc b/docs/operating-eck/operator-config.asciidoc index 9086f36068..7b9ea037d0 100644 --- a/docs/operating-eck/operator-config.asciidoc +++ b/docs/operating-eck/operator-config.asciidoc @@ -20,6 +20,7 @@ ECK can be configured using either command line flags or environment variables. |cert-validity |8760h |Duration representing the validity period of a generated TLS certificate. |config |"" | Path to a file containing the operator configuration. |container-registry |docker.elastic.co | Container registry to use for pulling Elastic Stack container images. +|container-repository |"" | Container repository to use for pulling Elastic Stack container images. |container-suffix |"" | Suffix to be appended to container images by default. Cannot be combined with `--ubi-only` flag. |disable-config-watch| false| Watch the configuration file for changes and restart to apply them. Only effective when the `--config` flag is used to set the configuration file. |disable-telemetry| false| Disable periodically updating ECK telemetry data for Kibana to consume. diff --git a/pkg/controller/common/container/container.go b/pkg/controller/common/container/container.go index 5056dcc4cc..8f837c11fd 100644 --- a/pkg/controller/common/container/container.go +++ b/pkg/controller/common/container/container.go @@ -12,8 +12,9 @@ import ( const DefaultContainerRegistry = "docker.elastic.co" var ( - containerRegistry = DefaultContainerRegistry - containerSuffix = "" + containerRegistry = DefaultContainerRegistry + containerRepository = "" + containerSuffix = "" ) // SetContainerRegistry sets the global container registry used to download Elastic stack images. @@ -21,12 +22,25 @@ func SetContainerRegistry(registry string) { containerRegistry = registry } +// SetContainerRegistry sets a global container repository used to download Elastic stack images. +func SetContainerRepository(repository string) { + containerRepository = repository +} + func SetContainerSuffix(suffix string) { containerSuffix = suffix } type Image string +func (i Image) Name() string { + parts := strings.Split(string(i), "/") + if len(parts) == 2 { + return parts[1] + } + return string(i) +} + const ( APMServerImage Image = "apm/apm-server" ElasticsearchImage Image = "elasticsearch/elasticsearch" @@ -45,9 +59,16 @@ const ( // ImageRepository returns the full container image name by concatenating the current container registry and the image path with the given version. func ImageRepository(img Image, version string) string { + // replace repository if defined + image := img + + if containerRepository != "" { + image = Image(fmt.Sprintf("%s/%s", containerRepository, img.Name())) + } + // don't double append suffix if already contained as e.g. the case for maps if strings.HasSuffix(string(img), containerSuffix) { - return fmt.Sprintf("%s/%s:%s", containerRegistry, img, version) + return fmt.Sprintf("%s/%s:%s", containerRegistry, image, version) } - return fmt.Sprintf("%s/%s%s:%s", containerRegistry, img, containerSuffix, version) + return fmt.Sprintf("%s/%s%s:%s", containerRegistry, image, containerSuffix, version) } diff --git a/pkg/controller/common/container/container_test.go b/pkg/controller/common/container/container_test.go index b0c4e8ff14..f5ac542c04 100644 --- a/pkg/controller/common/container/container_test.go +++ b/pkg/controller/common/container/container_test.go @@ -13,11 +13,12 @@ import ( func TestImageRepository(t *testing.T) { testRegistry := "my.docker.registry.com:8080" testCases := []struct { - name string - image Image - suffix string - version string - want string + name string + image Image + repository string + suffix string + version string + want string }{ { name: "APM server image", @@ -50,6 +51,21 @@ func TestImageRepository(t *testing.T) { suffix: "-ubi8", want: testRegistry + "/elastic-maps-service/elastic-maps-server-ubi8:7.12.0", }, + { + name: "Elasticsearch image with custom repository", + image: ElasticsearchImage, + version: "42.0.0", + repository: "elastic", + want: testRegistry + "/elastic/elasticsearch:42.0.0", + }, + { + name: "Elasticsearch image with custom repository and suffix", + image: ElasticsearchImage, + version: "42.0.0", + repository: "elastic", + suffix: "-obi1", + want: testRegistry + "/elastic/elasticsearch-obi1:42.0.0", + }, } for _, tc := range testCases { @@ -63,7 +79,9 @@ func TestImageRepository(t *testing.T) { }() SetContainerRegistry(testRegistry) + SetContainerRepository(tc.repository) SetContainerSuffix(tc.suffix) + have := ImageRepository(tc.image, tc.version) assert.Equal(t, tc.want, have) }) diff --git a/pkg/controller/common/operator/flags.go b/pkg/controller/common/operator/flags.go index 8568868da7..cbaced28c0 100644 --- a/pkg/controller/common/operator/flags.go +++ b/pkg/controller/common/operator/flags.go @@ -13,6 +13,7 @@ const ( CertValidityFlag = "cert-validity" ConfigFlag = "config" ContainerRegistryFlag = "container-registry" + ContainerRepositoryFlag = "container-repository" ContainerSuffixFlag = "container-suffix" DebugHTTPListenFlag = "debug-http-listen" DisableConfigWatch = "disable-config-watch" From e679b5f4beb19bc4e983b5a6834d75c20dc4fdb0 Mon Sep 17 00:00:00 2001 From: Thibault Richard Date: Fri, 28 Apr 2023 17:54:09 +0200 Subject: [PATCH 07/26] Update Logstash stack mon tests (#6741) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adjusts the number of volumes expected from Beats sidecars in the Logstash Stack Monitoring unit tests. Why? Because we don't test PRs with an automatic merge of the main branch (🐛🐞), we missed that the tests in #6732 had to be updated to take into account the changes made by #6703, which adds a new temp volume to the Beats sidecars. --- .../logstash/stackmon/sidecar_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/controller/logstash/stackmon/sidecar_test.go b/pkg/controller/logstash/stackmon/sidecar_test.go index 627486e106..4b1243a93f 100644 --- a/pkg/controller/logstash/stackmon/sidecar_test.go +++ b/pkg/controller/logstash/stackmon/sidecar_test.go @@ -99,8 +99,8 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 2, esEnvVarsLength: 0, - podVolumesLength: 2, - metricsVolumeMountsLength: 2, + podVolumesLength: 3, + metricsVolumeMountsLength: 3, }, { name: "with logs monitoring", @@ -112,8 +112,8 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 2, esEnvVarsLength: 1, - podVolumesLength: 3, - logVolumeMountsLength: 3, + podVolumesLength: 4, + logVolumeMountsLength: 4, }, { name: "with metrics and logs monitoring", @@ -126,9 +126,9 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 3, esEnvVarsLength: 1, - podVolumesLength: 4, - metricsVolumeMountsLength: 2, - logVolumeMountsLength: 3, + podVolumesLength: 6, + metricsVolumeMountsLength: 3, + logVolumeMountsLength: 4, }, { name: "with metrics and logs monitoring with different es ref", @@ -141,9 +141,9 @@ func TestWithMonitoring(t *testing.T) { }, containersLength: 3, esEnvVarsLength: 1, - podVolumesLength: 5, - metricsVolumeMountsLength: 2, - logVolumeMountsLength: 3, + podVolumesLength: 7, + metricsVolumeMountsLength: 3, + logVolumeMountsLength: 4, }, } From 0b7a58a770272bd7dea95a1aadee436cd0c930ae Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Tue, 21 Mar 2023 16:00:10 -0400 Subject: [PATCH 08/26] First pass --- .../logstash.asciidoc | 194 ++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 docs/orchestrating-elastic-stack-applications/logstash.asciidoc diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc new file mode 100644 index 0000000000..cd33aad0b4 --- /dev/null +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -0,0 +1,194 @@ +:page_id: logstash +:agent_recipes: https://raw.githubusercontent.com/elastic/cloud-on-k8s/{eck_release_branch}/config/recipes/logstash +ifdef::env-github[] +**** +link:https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-{page_id}.html[View this document on the Elastic website] +**** +endif::[] +[id="{p}-{page_id}"] += Run Logstash on ECK + +This section describes how to configure and deploy Logstash with ECK. + +* <<{p}-logstash-quickstart,Quickstart>> +* <<{p}-logstash-configuration,Configuration>> +* <<{p}-logstash-configuration-examples,Configuration examples>> + +NOTE: Running Logstash on ECK is compatible only with Logstash 8.7+. + +[id="{p}-logstash-quickstart"] +== Quickstart + +. Apply the following specification to deploy Elastic Agent with the System metrics integration to harvest CPU metrics from the Agent Pods. ECK automatically configures the secured connection to an Elasticsearch cluster named `quickstart`, created in the link:k8s-quickstart.html[Elasticsearch quickstart]. ++ +[source,yaml,subs="attributes,+macros,callouts"] +---- +cat $$<<$$EOF | kubectl apply -f - +apiVersion: agent.k8s.elastic.co/v1alpha1 +kind: Agent +metadata: + name: quickstart +spec: + version: {version} + elasticsearchRefs: + - name: quickstart + EOF +---- ++ +Check <<{p}-logstash-configuration-examples>> for more ready-to-use manifests. + +. Monitor the status of Logstash ++ +[source,sh] +---- +kubectl get logstash +---- ++ +[source,sh,subs="attributes"] +---- +NAME AVAILABLE EXPECTED AGE VERSION +quickstart 3 3 4s {version} +---- + +. List all the Pods that belong to a given Logstash specification. ++ +[source,sh] +---- +kubectl get pods --selector='logstash.k8s.elastic.co/name=quickstart' +---- ++ +[source,sh] +---- +NAME READY STATUS RESTARTS AGE +quickstart-sample-ls-0 1/1 Running 0 91s +quickstart-sample-ls-1 1/1 Running 0 91s +quickstart-sample-ls-2 1/1 Running 0 91s +---- + +. Access logs for one of the Pods. ++ +[source,sh] +---- +kubectl logs -f quickstart-sample-ls-0 +---- + +== Configuration + + +[id="{p}-logstash-upgrade-specification"] +=== Upgrade the Logstash specification + +You can upgrade the Logstash version or change settings by editing the YAML specification. ECK applies the changes by performing a rolling restart of Logstash's Pods. + +[id="{p}-logstash-custom-configuration"] +=== Customize the Logstash configuration + +The Logstash configuration (equivalent to logstash.yml) is defined in the `config` element: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + elasticsearchRefs: + - name: quickstart + config: +---- + +Alternatively, it can be provided through a Secret specified in the `configRef` element. The Secret must have an `logstash.yml` entry with this configuration: +[source,yaml,subs="attributes,+macros"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + elasticsearchRefs: + - name: quickstart + configRef: + secretName: quickstart-config +--- +apiVersion: v1 +kind: Secret +metadata: + name: quickstart-config +stringData: + logstash.yml: |- + +---- + + +[id="{p}-logstash-pipelines"] +=== Adding Logstash Pipelines + + +Elastic Agent supports the use of multiple outputs. Therefore, the `elasticsearchRefs` element accepts multiple references to Elasticsearch clusters. ECK populates the outputs section of the Elastic Agent configuration based on those references. If you configure more than one output, you also have to specify a unique `outputName` attribute. + +To send Elastic Agent's internal monitoring and log data to a different Elasticsearch cluster called `agent-monitoring` in the `elastic-monitoring` namespace, and the harvested metrics to our `quickstart` cluster, you have to define two `elasticsearchRefs` as shown in the following example: + +[source,yaml,subs="attributes,+macros"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + elasticsearchRefs: + - name: quickstart + outputName: default + - name: agent-monitoring + namespace: elastic-monitoring + outputName: monitoring +... +---- + +[id="{p}-logstash-connect-es"] +=== Customize the connection to an Elasticsearch cluster + +The `elasticsearchRefs` element allows ECK to automatically configure Elastic Agent to establish a secured connection to one or more managed Elasticsearch clusters. By default, it targets all nodes in your cluster. If you want to direct traffic to specific nodes of your Elasticsearch cluster, refer to <<{p}-traffic-splitting>> for more information and examples. + +Check <<{p}-compute-resources-beats-agent>> for more information on how to use the Pod template to adjust the resources given to Elastic Agent. + + +[id="{p}-logstash-configuration-examples"] +== Configuration examples + +This section contains manifests that illustrate common use cases, and can be your starting point in exploring Logstash deployed with ECK. These manifests are self-contained and work out-of-the-box on any non-secured Kubernetes cluster. They all contain a three-node Elasticsearch cluster and a single Kibana instance. + +CAUTION: The examples in this section are for illustration purposes only and should not be considered to be production-ready. Some of these examples use the `node.store.allow_mmap: false` setting which has performance implications and should be tuned for production workloads, as described in <<{p}-virtual-memory>>. + + +=== System integration + +[source,sh,subs="attributes"] +---- +kubectl apply -f {agent_recipes}/system-integration.yaml +---- + +Deploys Logstash with a single pipeline + +=== Kubernetes integration + +[source,sh,subs="attributes"] +---- +kubectl apply -f {agent_recipes}/kubernetes-integration.yaml +---- + +Deploys Logstash with multiple pipelines + +=== Elasticsearch and Kibana Stack Monitoring + +[source,sh,subs="attributes"] +---- +kubectl apply -f {beats_url}/stack_monitoring.yaml +---- + +Deploys Metricbeat configured for Elasticsearch and Kibana link:https://www.elastic.co/guide/en/kibana/current/xpack-monitoring.html[Stack Monitoring] and Filebeat using autodiscover. Deploys one monitored Elasticsearch cluster and one monitoring Elasticsearch cluster. You can access the Stack Monitoring app in the monitoring cluster's Kibana. + +NOTE: In this example, TLS verification is disabled when Metricbeat communicates with the monitored cluster, which is not secure and should not be used in production. To solve this, use custom certificates and configure Metricbeat to verify them. + From 4f1b10219a762b8e6d06d94c5c030558150da296 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 24 Mar 2023 09:08:47 -0400 Subject: [PATCH 09/26] Added more logstash docs --- .../advanced-topics/stack-monitoring.asciidoc | 19 +- .../logstash.asciidoc | 196 ++++++++++++++++-- docs/supported-versions.asciidoc | 1 + 3 files changed, 195 insertions(+), 21 deletions(-) diff --git a/docs/advanced-topics/stack-monitoring.asciidoc b/docs/advanced-topics/stack-monitoring.asciidoc index cded98a268..b541da6e10 100644 --- a/docs/advanced-topics/stack-monitoring.asciidoc +++ b/docs/advanced-topics/stack-monitoring.asciidoc @@ -9,7 +9,7 @@ endif::[] = Stack Monitoring You can enable link:https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html[Stack Monitoring] -on Elasticsearch, Kibana and Beats to collect and ship their metrics and logs to a dedicated monitoring cluster. +on Elasticsearch, Kibana, Beats and Logstash to collect and ship their metrics and logs to a dedicated monitoring cluster. To enable Stack Monitoring, simply reference the monitoring Elasticsearch cluster in the `spec.monitoring` section of their specification. @@ -74,11 +74,28 @@ spec: elasticsearchRefs: - name: monitoring namespace: observability <3> +--- +apiVersion: logstash.k8s.elastic.co/v1beta1 +kind: Logstash +metadata: + name: monitored-sample +spec: + version: {version} + monitoring: + metrics: + elasticsearchRefs: + - name: monitoring + namespace: observability <4> + logs: + elasticsearchRefs: + - name: monitoring + namespace: observability <4> ---- <1> The use of `namespace` is optional if the monitoring Elasticsearch cluster and the monitored Elasticsearch cluster are running in the same namespace. <2> The use of `namespace` is optional if the Elasticsearch cluster and the Kibana instance are running in the same namespace. <3> The use of `namespace` is optional if the Elasticsearch cluster and the Beats instance are running in the same namespace. +<4> The use of `namespace` is optional if the Elasticsearch cluster and the Logstash instances are running in the same namespace. NOTE: You can configure an Elasticsearch cluster to monitor itself. diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index cd33aad0b4..85958fd1ce 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -1,5 +1,5 @@ :page_id: logstash -:agent_recipes: https://raw.githubusercontent.com/elastic/cloud-on-k8s/{eck_release_branch}/config/recipes/logstash +:logstash_recipes: https://raw.githubusercontent.com/elastic/cloud-on-k8s/{eck_release_branch}/config/recipes/logstash ifdef::env-github[] **** link:https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-{page_id}.html[View this document on the Elastic website] @@ -8,31 +8,66 @@ endif::[] [id="{p}-{page_id}"] = Run Logstash on ECK +experimental[] + This section describes how to configure and deploy Logstash with ECK. * <<{p}-logstash-quickstart,Quickstart>> * <<{p}-logstash-configuration,Configuration>> * <<{p}-logstash-configuration-examples,Configuration examples>> +* <<{p}-logstash-technical-preview-limitations,Technical Preview Limitations>> + NOTE: Running Logstash on ECK is compatible only with Logstash 8.7+. + [id="{p}-logstash-quickstart"] == Quickstart + . Apply the following specification to deploy Elastic Agent with the System metrics integration to harvest CPU metrics from the Agent Pods. ECK automatically configures the secured connection to an Elasticsearch cluster named `quickstart`, created in the link:k8s-quickstart.html[Elasticsearch quickstart]. + [source,yaml,subs="attributes,+macros,callouts"] ---- cat $$<<$$EOF | kubectl apply -f - -apiVersion: agent.k8s.elastic.co/v1alpha1 -kind: Agent +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash metadata: name: quickstart spec: - version: {version} + count: 3 + version: 8.6.1 + config: elasticsearchRefs: - - name: quickstart - EOF + - clusterName: quickstart + name: quickstart + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QUICKSTART_ES_HOSTS}" ] + user => "${QUICKSTART_ES_USER}" + password => "${QUICKSTART_ES_PASSWORD}" + cacert => "${QUICKSTART_ES_CA_CERTS}" + } + } + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +EOF ---- + Check <<{p}-logstash-configuration-examples>> for more ready-to-use manifests. @@ -59,22 +94,21 @@ kubectl get pods --selector='logstash.k8s.elastic.co/name=quickstart' + [source,sh] ---- -NAME READY STATUS RESTARTS AGE -quickstart-sample-ls-0 1/1 Running 0 91s -quickstart-sample-ls-1 1/1 Running 0 91s -quickstart-sample-ls-2 1/1 Running 0 91s +NAME READY STATUS RESTARTS AGE +quickstart-ls-0 1/1 Running 0 91s +quickstart-ls-1 1/1 Running 0 91s +quickstart-ls-2 1/1 Running 0 91s ---- . Access logs for one of the Pods. + [source,sh] ---- -kubectl logs -f quickstart-sample-ls-0 +kubectl logs -f quickstart-ls-0 ---- == Configuration - [id="{p}-logstash-upgrade-specification"] === Upgrade the Logstash specification @@ -118,17 +152,112 @@ metadata: name: quickstart-config stringData: logstash.yml: |- - ---- [id="{p}-logstash-pipelines"] === Adding Logstash Pipelines +Logstash pipelines (equivalent to pipelines.yml) are defined in the `pipelines` element: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + elasticsearchRefs: + - name: quickstart + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QUICKSTART_ES_HOSTS}" ] + user => "${QUICKSTART_ES_USER}" + password => "${QUICKSTART_ES_PASSWORD}" + cacert => "${QUICKSTART_ES_CA_CERTS}" + } + } +---- + +Alternatively, it can be provided through a Secret specified in the `configRef` element. The Secret must have an `logstash.yml` entry with this configuration: +[source,yaml,subs="attributes,+macros"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + elasticsearchRefs: + - name: quickstart + pipelinesRef: + secretName: quickstart-pipeline +--- +apiVersion: v1 +kind: Secret +metadata: + name: quickstart-pipeline +stringData: + pipelines.yml: |- + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QUICKSTART_ES_HOSTS}" ] + user => "${QUICKSTART_ES_USER}" + password => "${QUICKSTART_ES_PASSWORD}" + cacert => "${QUICKSTART_ES_CA_CERTS}" + } + } + +---- -Elastic Agent supports the use of multiple outputs. Therefore, the `elasticsearchRefs` element accepts multiple references to Elasticsearch clusters. ECK populates the outputs section of the Elastic Agent configuration based on those references. If you configure more than one output, you also have to specify a unique `outputName` attribute. +Logstash on ECK supports all options in `pipelines.yml`, including settings to update the number of workers, and + the size of the batch that the pipeline will process. This also includes using `path.config` to point to volumes + mounted on the logstash container: -To send Elastic Agent's internal monitoring and log data to a different Elasticsearch cluster called `agent-monitoring` in the `elastic-monitoring` namespace, and the harvested metrics to our `quickstart` cluster, you have to define two `elasticsearchRefs` as shown in the following example: +[source,yaml,subs="attributes,+macros"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 1 + pipelines: + - pipeline.id: external + pipeline.workers: 4 + path.config: /usr/share/logstash/config/pipelines + podTemplate: + spec: + containers: + - name: logstash + volumeMounts: + - mountPath: /usr/share/logstash/config/pipelines + name: pipelines + readOnly: true + volumes: + - name: pipelines + hostPath: + path: /home/logstash-dev/logstash/pipelines +---- + +NOTE: Persistent Queues and Dead Letter Queues will be supported in a later release, but are not currently suppored. [source,yaml,subs="attributes,+macros"] ---- @@ -163,20 +292,20 @@ This section contains manifests that illustrate common use cases, and can be you CAUTION: The examples in this section are for illustration purposes only and should not be considered to be production-ready. Some of these examples use the `node.store.allow_mmap: false` setting which has performance implications and should be tuned for production workloads, as described in <<{p}-virtual-memory>>. -=== System integration +=== Single Pipeline [source,sh,subs="attributes"] ---- -kubectl apply -f {agent_recipes}/system-integration.yaml +kubectl apply -f {logstash_recipes}/single-pipeline.yaml ---- Deploys Logstash with a single pipeline -=== Kubernetes integration +=== Multiple Pipelines [source,sh,subs="attributes"] ---- -kubectl apply -f {agent_recipes}/kubernetes-integration.yaml +kubectl apply -f {logstash_recipes}/multiple-pipelines.yaml ---- Deploys Logstash with multiple pipelines @@ -185,10 +314,37 @@ Deploys Logstash with multiple pipelines [source,sh,subs="attributes"] ---- -kubectl apply -f {beats_url}/stack_monitoring.yaml +kubectl apply -f {logstash_recipes}/stack_monitoring.yaml ---- Deploys Metricbeat configured for Elasticsearch and Kibana link:https://www.elastic.co/guide/en/kibana/current/xpack-monitoring.html[Stack Monitoring] and Filebeat using autodiscover. Deploys one monitored Elasticsearch cluster and one monitoring Elasticsearch cluster. You can access the Stack Monitoring app in the monitoring cluster's Kibana. NOTE: In this example, TLS verification is disabled when Metricbeat communicates with the monitored cluster, which is not secure and should not be used in production. To solve this, use custom certificates and configure Metricbeat to verify them. +[id="{p}-logstash-on-eck-limitations"] +== Logstash on ECK Limitations + +* When running Logstash on ECK, it is important to understand how data is sent into Logstash when determining replica counts of pods. Pipelines that include plugins that need to store state, or cannot automatically distribute work across pods should be treated with care as data loss and/or duplication can result from incorrectly configured setups +* To that end, the technical preview should only be run with a limited set of input plugins: + +[id="{p}-logstash-technical-preview-limitations"] +== Technical Preview Limitations + +Note that this release is a technical preview, is still under active development and has limited functionality. +Limitations include, but are not limited to: + +* Limited support for plugins - +** Input plugins: logstash-input-azure_event_hubs, logstash-input-beats, logstash-input-elastic_agent, logstash-input-kafka, logstash-input-tcp, logstash-input-http +** While most filter plugins are supported, the following plugins are not currently supported: +*** logstash-filter-jdbc_static, logstash-filter-jdbc_streaming, logstash-filter-aggregate +*** Other filters may require additional manual work to mount volumes +** While most output plugins are supported, the following plugins are not currently supported, or may require manual work to be operational: +*** logstash-output-s3 - requires a volume mount to store in progress work to avoid data loss +*** logstash-output-jms - requires jar files to be placed on the logstash classpath + + +* No support for persistence +* `ElasticsearchRef` implementation in plugins in preview mode +** In preview mode, plugins will need to be populated with environment variables populated by the Logstash operator. +* No `ElasticsearchRef` support for Pipeline Central Management +** Manual configuration required in `Config`/`ConfigRef` diff --git a/docs/supported-versions.asciidoc b/docs/supported-versions.asciidoc index f4021a995b..0e17228475 100644 --- a/docs/supported-versions.asciidoc +++ b/docs/supported-versions.asciidoc @@ -7,6 +7,7 @@ * Beats: 7.0+, 8+ * Elastic Agent: 7.10+ (standalone), 7.14+ (Fleet), 8+ * Elastic Maps Server: 7.11+, 8+ +* Logstash: 8.7+ ECK should work with all conformant installers as listed in these link:https://github.com/cncf/k8s-conformance/blob/master/faq.md#what-is-a-distribution-hosted-platform-and-an-installer[FAQs]. Distributions include source patches and so may not work as-is with ECK. From b2e00d47c09d8cce5f8753a0e9d5976be8874b1b Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 24 Mar 2023 14:44:41 -0400 Subject: [PATCH 10/26] Add Logstash Recipes Still a work in progress - will need elasticsearchRef to work fully --- config/recipes/logstash/logstash-eck.yaml | 119 ++++++++++++++ .../logstash/logstash-pipelinesecret.yaml | 129 +++++++++++++++ .../logstash/logstash-pipelinevolume.yaml | 142 +++++++++++++++++ config/recipes/logstash/stack_monitoring.yaml | 149 ++++++++++++++++++ .../logstash.asciidoc | 46 ++++-- 5 files changed, 568 insertions(+), 17 deletions(-) create mode 100644 config/recipes/logstash/logstash-eck.yaml create mode 100644 config/recipes/logstash/logstash-pipelinesecret.yaml create mode 100644 config/recipes/logstash/logstash-pipelinevolume.yaml create mode 100644 config/recipes/logstash/stack_monitoring.yaml diff --git a/config/recipes/logstash/logstash-eck.yaml b/config/recipes/logstash/logstash-eck.yaml new file mode 100644 index 0000000000..407cae1fde --- /dev/null +++ b/config/recipes/logstash/logstash-eck.yaml @@ -0,0 +1,119 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: elasticsearch +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: kibana +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat +spec: + type: filebeat + version: 8.6.1 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-api.default.svc:5044"] + deployment: + podTemplate: + metadata: + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: logstash +spec: + count: 1 + version: 8.6.1 +# elasticsearchRefs: +# - clusterName: eck +# name: elasticsearch + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + stdout { codec => rubydebug } + } + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 diff --git a/config/recipes/logstash/logstash-pipelinesecret.yaml b/config/recipes/logstash/logstash-pipelinesecret.yaml new file mode 100644 index 0000000000..697d879b72 --- /dev/null +++ b/config/recipes/logstash/logstash-pipelinesecret.yaml @@ -0,0 +1,129 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: elasticsearch +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: kibana +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat +spec: + type: filebeat + version: 8.6.1 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-api.default.svc:5044"] + deployment: + podTemplate: + metadata: + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: logstash +spec: + count: 1 + version: 8.6.1 +# elasticsearchRefs: +# - clusterName: eck +# name: elasticsearch + pipelinesRef: + secretName: logstash-pipeline + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +--- +apiVersion: v1 +kind: Secret +metadata: + name: logstash-pipeline + labels: + app.kubernetes.io/name: eck-logstash +stringData: + pipelines.yml: |- + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + stdout { codec => rubydebug } + } diff --git a/config/recipes/logstash/logstash-pipelinevolume.yaml b/config/recipes/logstash/logstash-pipelinevolume.yaml new file mode 100644 index 0000000000..eab23dc81d --- /dev/null +++ b/config/recipes/logstash/logstash-pipelinevolume.yaml @@ -0,0 +1,142 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: elasticsearch +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: kibana +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat +spec: + type: filebeat + version: 8.6.1 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-api.default.svc:5044"] + deployment: + podTemplate: + metadata: + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: logstash +spec: + count: 1 + version: 8.6.1 +# elasticsearchRefs: +# - clusterName: eck +# name: elasticsearch + pipelines: + - pipeline.id: main + path.config: /usr/share/logstash/config/pipelines + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 + podTemplate: + spec: + volumes: + - name: logstash-pipeline + secret: + secretName: logstash-pipeline + containers: + - name: logstash + volumeMounts: + - mountPath: /usr/share/logstash/config/pipelines + name: logstash-pipeline + readOnly: true +--- +apiVersion: v1 +kind: Secret +metadata: + name: logstash-pipeline + labels: + app.kubernetes.io/name: eck-logstash +stringData: + input.conf: |- + input { + beats { + port => 5044 + } + } + filters.conf: |- + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output.conf: |- + output { + stdout { codec => rubydebug } + } \ No newline at end of file diff --git a/config/recipes/logstash/stack_monitoring.yaml b/config/recipes/logstash/stack_monitoring.yaml new file mode 100644 index 0000000000..6edc637409 --- /dev/null +++ b/config/recipes/logstash/stack_monitoring.yaml @@ -0,0 +1,149 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: elasticsearch +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: kibana +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat +spec: + type: filebeat + version: 8.6.1 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-api.default.svc:5044"] + deployment: + podTemplate: + metadata: + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: logstash +spec: + count: 1 + version: 8.6.1 + # elasticsearchRefs: + # - clusterName: eck + # name: elasticsearch + monitoring: + metrics: + elasticsearchRefs: + - name: elasticsearch-monitoring + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + stdout { codec => rubydebug } + } + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch-monitoring +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana-monitoring +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: elasticsearch-monitoring + config: + # enable the UI to reflect container level CPU usage, only displays info if CPU limits are set on the monitored ES cluster + # https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html + monitoring.ui.container.elasticsearch.enabled: true \ No newline at end of file diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 85958fd1ce..3cb5256170 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -39,8 +39,8 @@ spec: version: 8.6.1 config: elasticsearchRefs: - - clusterName: quickstart - name: quickstart +# - clusterName: quickstart +# name: quickstart pipelines: - pipeline.id: main config.string: | @@ -50,12 +50,7 @@ spec: } } output { - elasticsearch { - hosts => [ "${QUICKSTART_ES_HOSTS}" ] - user => "${QUICKSTART_ES_USER}" - password => "${QUICKSTART_ES_PASSWORD}" - cacert => "${QUICKSTART_ES_CA_CERTS}" - } + stdout {} } services: - name: beats @@ -292,23 +287,42 @@ This section contains manifests that illustrate common use cases, and can be you CAUTION: The examples in this section are for illustration purposes only and should not be considered to be production-ready. Some of these examples use the `node.store.allow_mmap: false` setting which has performance implications and should be tuned for production workloads, as described in <<{p}-virtual-memory>>. -=== Single Pipeline +=== Single Pipeline defined in CRD [source,sh,subs="attributes"] ---- -kubectl apply -f {logstash_recipes}/single-pipeline.yaml +kubectl apply -f {logstash_recipes}/logstash-eck.yaml ---- -Deploys Logstash with a single pipeline +Deploys Logstash with a single pipeline defined in the CRD -=== Multiple Pipelines +=== Single Pipeline defined in CRD [source,sh,subs="attributes"] ---- -kubectl apply -f {logstash_recipes}/multiple-pipelines.yaml +kubectl apply -f {logstash_recipes}/logstash-eck.yaml ---- -Deploys Logstash with multiple pipelines +Deploys Logstash with a single pipeline defined in the CRD + +=== Single Pipeline defined in secret + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-pipelinesecret.yaml +---- + +Deploys Logstash with a single pipeline defined in a secret, referenced by a `pipelineRef` + +=== Pipeline configuration in mounted volume + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-pipelinevolume.yaml +---- + +Deploys Logstash with a single pipeline defined in a secret, mounted as a volume, and referenced by +`path.config` === Elasticsearch and Kibana Stack Monitoring @@ -317,9 +331,7 @@ Deploys Logstash with multiple pipelines kubectl apply -f {logstash_recipes}/stack_monitoring.yaml ---- -Deploys Metricbeat configured for Elasticsearch and Kibana link:https://www.elastic.co/guide/en/kibana/current/xpack-monitoring.html[Stack Monitoring] and Filebeat using autodiscover. Deploys one monitored Elasticsearch cluster and one monitoring Elasticsearch cluster. You can access the Stack Monitoring app in the monitoring cluster's Kibana. - -NOTE: In this example, TLS verification is disabled when Metricbeat communicates with the monitored cluster, which is not secure and should not be used in production. To solve this, use custom certificates and configure Metricbeat to verify them. +Deploys an Elasticsearch and Kibana monitoring cluster, and a Logstash that will send it's monitoring information to this cluster. You can view the stack monitoring information in the monitoring cluster's Kibana [id="{p}-logstash-on-eck-limitations"] == Logstash on ECK Limitations From 90683afb55c20a5e56a547affbcd172d0be24cda Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 24 Mar 2023 15:34:57 -0400 Subject: [PATCH 11/26] Tidy up readme --- config/recipes/logstash/README.asciidoc | 20 +++++++++++++++++-- ...onitoring.yaml => logstash-monitored.yaml} | 0 .../logstash.asciidoc | 4 +--- 3 files changed, 19 insertions(+), 5 deletions(-) rename config/recipes/logstash/{stack_monitoring.yaml => logstash-monitored.yaml} (100%) diff --git a/config/recipes/logstash/README.asciidoc b/config/recipes/logstash/README.asciidoc index 65775b5ad5..eda4895ef6 100644 --- a/config/recipes/logstash/README.asciidoc +++ b/config/recipes/logstash/README.asciidoc @@ -8,7 +8,23 @@ endif::[] = Using Logstash with ECK -This recipe demonstrates how to run the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] on Kubernetes with Elasticsearch, Kibana and Filebeat deployed via ECK. +These recipes demonstrate how to run the Logstash, Elasticsearch, Kibana and Filebeat deployed via ECK, using the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] as a starting point +===== Inline Pipeline usage - `logstash-eck.yaml` -CAUTION: This recipe uses the `node.store.allow_mmap: false` configuration value to avoid configuring memory mapping settings on the underlying host. This could have a significant performance impact on your Elasticsearch cluster and should not be used in production without careful consideration. See https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html for more information. +Deploys Logstash with the pipeline defined inline in the CRD + +===== Pipeline as secret - `logstash-pipelinesecret.yaml` + +Deploys Logstash with the pipeline defined in a secret and referred to via `pipelinesRef` + +===== Pipeline as mounted volume - `logstash-pipelinevolume.yaml` + +Deploys Logstash with the pipeline details defined in the CRD, and the pipeline itself mounted as a volume + +===== Logstash with Stack Monitoring - `logstash-monitored.yaml` + +Deploys Logstash and a dedicated Elasticsearch and Kibana monitoring cluster, and sends Logstash monitoring data to that cluster. + + +CAUTION: These recipes use the `node.store.allow_mmap: false` configuration value to avoid configuring memory mapping settings on the underlying host. This could have a significant performance impact on your Elasticsearch cluster and should not be used in production without careful consideration. See https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html for more information. diff --git a/config/recipes/logstash/stack_monitoring.yaml b/config/recipes/logstash/logstash-monitored.yaml similarity index 100% rename from config/recipes/logstash/stack_monitoring.yaml rename to config/recipes/logstash/logstash-monitored.yaml diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 3cb5256170..1fab822abc 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -276,8 +276,6 @@ spec: The `elasticsearchRefs` element allows ECK to automatically configure Elastic Agent to establish a secured connection to one or more managed Elasticsearch clusters. By default, it targets all nodes in your cluster. If you want to direct traffic to specific nodes of your Elasticsearch cluster, refer to <<{p}-traffic-splitting>> for more information and examples. -Check <<{p}-compute-resources-beats-agent>> for more information on how to use the Pod template to adjust the resources given to Elastic Agent. - [id="{p}-logstash-configuration-examples"] == Configuration examples @@ -328,7 +326,7 @@ Deploys Logstash with a single pipeline defined in a secret, mounted as a volume [source,sh,subs="attributes"] ---- -kubectl apply -f {logstash_recipes}/stack_monitoring.yaml +kubectl apply -f {logstash_recipes}/logstash-monitored.yaml ---- Deploys an Elasticsearch and Kibana monitoring cluster, and a Logstash that will send it's monitoring information to this cluster. You can view the stack monitoring information in the monitoring cluster's Kibana From 9ecb8cd87b5f65245b306bb6afd34f586cf80321 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Tue, 28 Mar 2023 10:31:41 -0400 Subject: [PATCH 12/26] Added more doc --- config/recipes/logstash/logstash-multi.yaml | 284 ++++++++++++++++ .../logstash.asciidoc | 315 ++++++++++++++---- 2 files changed, 534 insertions(+), 65 deletions(-) create mode 100644 config/recipes/logstash/logstash-multi.yaml diff --git a/config/recipes/logstash/logstash-multi.yaml b/config/recipes/logstash/logstash-multi.yaml new file mode 100644 index 0000000000..ddef053b1b --- /dev/null +++ b/config/recipes/logstash/logstash-multi.yaml @@ -0,0 +1,284 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: qa + labels: + name: qa +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: qa + namespace: qa + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: elasticsearch +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: qa + namespace: qa + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: kibana +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: qa +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: production + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: elasticsearch +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: production + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: kibana +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: production +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat +spec: + type: filebeat + version: 8.6.1 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-api.default.svc:5044"] + deployment: + podTemplate: + metadata: + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: filebeat + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash + labels: + app.kubernetes.io/name: eck-logstash + app.kubernetes.io/component: logstash +spec: + count: 1 + version: 8.6.1 + elasticsearchRefs: + - clusterName: prod-es + name: production + - clusterName: qa-es + name: qa + namespace: qa + monitoring: + metrics: + elasticsearchRefs: + - name: elasticsearch-monitoring + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + elasticsearch { + hosts => [ "${PROD_ES_ES_HOSTS}" ] + user => "${PROD_ES_ES_USER}" + password => "${PROD_ES_ES_PASSWORD}" + cacert => "${PROD_ES_ES_CA_CERTS}" + } + elasticsearch { + hosts => [ "${QA_ES_ES_HOSTS}" ] + user => "${QA_ES_ES_USER}" + password => "${QA_ES_ES_PASSWORD}" + cacert => "${QA_ES_ES_CA_CERTS}" + } + } + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + elasticsearch { + hosts => [ "${PROD_ES_ES_HOSTS}" ] + user => "${PROD_ES_ES_USER}" + password => "${PROD_ES_ES_PASSWORD}" + cacert => "${PROD_ES_ES_CA_CERTS}" + } + elasticsearch { + hosts => [ "${QA_ES_ES_HOSTS}" ] + user => "${QA_ES_ES_USER}" + password => "${QA_ES_ES_PASSWORD}" + cacert => "${QA_ES_ES_CA_CERTS}" + } + } + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + pipeline { + send_to => 'prod' + } + pipeline { + send_to => 'qa' + } + } + - pipeline.id: production + config.string: | + input { + pipeline { + address => 'prod' + } + } + output { + elasticsearch { + hosts => [ "${PROD_ES_ES_HOSTS}" ] + user => "${PROD_ES_ES_USER}" + password => "${PROD_ES_ES_PASSWORD}" + cacert => "${PROD_ES_ES_CA_CERTS}" + } + - pipeline.id: qa + config.string: | + input { + pipeline { + address => 'qa' + } + } + output { + elasticsearch { + hosts => [ "${QA_ES_ES_HOSTS}" ] + user => "${QA_ES_ES_USER}" + password => "${QA_ES_ES_PASSWORD}" + cacert => "${QA_ES_ES_CA_CERTS}" + } + } + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch-monitoring +spec: + version: 8.6.1 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana-monitoring +spec: + version: 8.6.1 + count: 1 + elasticsearchRef: + name: elasticsearch-monitoring + config: + # enable the UI to reflect container level CPU usage, only displays info if CPU limits are set on the monitored ES cluster + # https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html + monitoring.ui.container.elasticsearch.enabled: true \ No newline at end of file diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 1fab822abc..72ce6ce087 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -14,7 +14,12 @@ This section describes how to configure and deploy Logstash with ECK. * <<{p}-logstash-quickstart,Quickstart>> * <<{p}-logstash-configuration,Configuration>> +** <<{p}-logstash-configuring-logstash,Configuring Logstash>> +** <<{p}-logstash-pipelines,Configuring Pipelines>> +** <<{p}-logstash-pipelines-es,Using Elasticsearch in Logstash Pipelines>> * <<{p}-logstash-configuration-examples,Configuration examples>> +* <<{p}-logstash-advanced-configuration,Advanced Configuration>> +** <<{p}-logstash-jvm-options,Setting JVM Options>> * <<{p}-logstash-technical-preview-limitations,Technical Preview Limitations>> @@ -24,9 +29,8 @@ NOTE: Running Logstash on ECK is compatible only with Logstash 8.7+. [id="{p}-logstash-quickstart"] == Quickstart +Add the following specification to create a minimal logstash deployment that will listen to a beats agent or elastic agent configured to send to logstash on port 5044, create the service and write the output to an Elasticsearch cluster named `quickstart`, created in the link:k8s-quickstart.html[Elasticsearch quickstart]. -. Apply the following specification to deploy Elastic Agent with the System metrics integration to harvest CPU metrics from the Agent Pods. ECK automatically configures the secured connection to an Elasticsearch cluster named `quickstart`, created in the link:k8s-quickstart.html[Elasticsearch quickstart]. -+ [source,yaml,subs="attributes,+macros,callouts"] ---- cat $$<<$$EOF | kubectl apply -f - @@ -35,12 +39,8 @@ kind: Logstash metadata: name: quickstart spec: - count: 3 + count: 1 version: 8.6.1 - config: - elasticsearchRefs: -# - clusterName: quickstart -# name: quickstart pipelines: - pipeline.id: main config.string: | @@ -50,7 +50,12 @@ spec: } } output { - stdout {} + elasticsearch { + hosts => [ "${QUICKSTART_ES_HOSTS}" ] + user => "${QUICKSTART_ES_USER}" + password => "${QUICKSTART_ES_PASSWORD}" + cacert => "${QUICKSTART_ES_CA_CERTS}" + } } services: - name: beats @@ -67,7 +72,7 @@ EOF + Check <<{p}-logstash-configuration-examples>> for more ready-to-use manifests. -. Monitor the status of Logstash +. Check the status of Logstash + [source,sh] ---- @@ -91,8 +96,6 @@ kubectl get pods --selector='logstash.k8s.elastic.co/name=quickstart' ---- NAME READY STATUS RESTARTS AGE quickstart-ls-0 1/1 Running 0 91s -quickstart-ls-1 1/1 Running 0 91s -quickstart-ls-2 1/1 Running 0 91s ---- . Access logs for one of the Pods. @@ -102,6 +105,7 @@ quickstart-ls-2 1/1 Running 0 91s kubectl logs -f quickstart-ls-0 ---- +[id="{p}-logstash-configuration"] == Configuration [id="{p}-logstash-upgrade-specification"] @@ -109,10 +113,10 @@ kubectl logs -f quickstart-ls-0 You can upgrade the Logstash version or change settings by editing the YAML specification. ECK applies the changes by performing a rolling restart of Logstash's Pods. -[id="{p}-logstash-custom-configuration"] -=== Customize the Logstash configuration +[id="{p}-logstash-configuring-logstash"] +=== Logstash configuration -The Logstash configuration (equivalent to logstash.yml) is defined in the `config` element: +The Logstash configuration (equivalent to logstash.yml) is defined in the `spec.config` section: [source,yaml,subs="attributes,+macros,callouts"] ---- @@ -122,12 +126,18 @@ metadata: name: quickstart spec: version: {version} + count: 1 elasticsearchRefs: - name: quickstart + clusterName: quickstart config: + pipeline.workers: 4 <1> + log.level: debug ---- +<1> Customize logstash configuration using logstash.yml settings here -Alternatively, it can be provided through a Secret specified in the `configRef` element. The Secret must have an `logstash.yml` entry with this configuration: + +Alternatively, it can be provided through a Secret specified in the `spec.configRef` section. The Secret must have an `logstash.yml` entry with this configuration: [source,yaml,subs="attributes,+macros"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 @@ -136,8 +146,10 @@ metadata: name: quickstart spec: version: {version} + count: 1 elasticsearchRefs: - name: quickstart + clusterName: quickstart configRef: secretName: quickstart-config --- @@ -147,13 +159,15 @@ metadata: name: quickstart-config stringData: logstash.yml: |- + pipeline.workers: 4 + log.level: debug ---- [id="{p}-logstash-pipelines"] -=== Adding Logstash Pipelines +=== Configuring Logstash Pipelines -Logstash pipelines (equivalent to pipelines.yml) are defined in the `pipelines` element: +Logstash pipelines (equivalent to pipelines.yml) are defined in the `spec.pipelines` section: [source,yaml,subs="attributes,+macros,callouts"] ---- @@ -163,8 +177,10 @@ metadata: name: quickstart spec: version: {version} + count: 1 elasticsearchRefs: - - name: quickstart + - clusterName: quickstart + name: quickstart pipelines: - pipeline.id: main config.string: | @@ -183,7 +199,7 @@ spec: } ---- -Alternatively, it can be provided through a Secret specified in the `configRef` element. The Secret must have an `logstash.yml` entry with this configuration: +Alternatively, it can be provided through a Secret specified in the `spec.pipelinesRef` element. The Secret must have an `logstash.yml` entry with this configuration: [source,yaml,subs="attributes,+macros"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 @@ -192,8 +208,10 @@ metadata: name: quickstart spec: version: {version} + count: 1 elasticsearchRefs: - - name: quickstart + - clusterName: quickstart + name: quickstart pipelinesRef: secretName: quickstart-pipeline --- @@ -225,7 +243,7 @@ Logstash on ECK supports all options in `pipelines.yml`, including settings to u the size of the batch that the pipeline will process. This also includes using `path.config` to point to volumes mounted on the logstash container: -[source,yaml,subs="attributes,+macros"] +[source,yaml,subs="attributes,+macros,callouts"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash @@ -234,27 +252,45 @@ metadata: spec: version: {version} count: 1 + elasticsearchRefs: + - clusterName: quickstart + name: quickstart pipelines: - - pipeline.id: external - pipeline.workers: 4 - path.config: /usr/share/logstash/config/pipelines - podTemplate: - spec: - containers: - - name: logstash - volumeMounts: - - mountPath: /usr/share/logstash/config/pipelines - name: pipelines - readOnly: true - volumes: - - name: pipelines - hostPath: - path: /home/logstash-dev/logstash/pipelines + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QUICKSTART_ES_HOSTS}" ] + user => "${QUICKSTART_ES_USER}" + password => "${QUICKSTART_ES_PASSWORD}" + cacert => "${QUICKSTART_ES_CA_CERTS}" + } + } ---- NOTE: Persistent Queues and Dead Letter Queues will be supported in a later release, but are not currently suppored. -[source,yaml,subs="attributes,+macros"] + +[id="{p}-logstash-pipelines-es"] +=== Using Elasticsearch in Logstash Pipelines + +The `spec.elasticsearchRefs` section provides a mechanism to help configure Logstash to estabish a secured connection to one or more managed Elasticsearch clusters. By default, it targets all nodes in your cluster. If you want to direct traffic to specific nodes of your Elasticsearch cluster, refer to <<{p}-traffic-splitting>> for more information and examples. + +In order to use `elasticsearchRefs` in a logstash pipeline, the logstash operator will create the necessary resources from the associated elasticsearch, and provide environment variables to allow these resources to be accessed from pipeline configuration, and will be replaced at runtime with the appropriate values. +The environment variables have a fixed naming convention: +`NORMALIZED_CLUSTERNAME_ES_HOSTS` +`NORMALIZED_CLUSTERNAME_ES_USERNAME` +`NORMALIZED_CLUSTERNAME_ES_PASSWORD` +`NORMALIZED_CLUSTERNAME_ES_CA_CERTS` + +where NORMALIZED_CLUSTERNAME is the value taken from the `clusterName` field of the `elasticsearchRef` property, capitalized, and `-` transformed to `_` - eg, prod-es, would becomed PROD_ES. + +[source,yaml,subs="attributes,+macros,callouts"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash @@ -262,19 +298,112 @@ metadata: name: quickstart spec: version: {version} - elasticsearchRefs: - - name: quickstart - outputName: default - - name: agent-monitoring - namespace: elastic-monitoring - outputName: monitoring -... + count: 1 + elasticsearchRefs: <1> + - clusterName: prod-es <2> + name: prod + - clusterName: qa-es <3> + name: qa + namespace: qa + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { <4> + hosts => [ "${PROD_ES_ES_HOSTS}" ] + user => "${PROD_ES_ES_USER}" + password => "${PROD_ES_ES_PASSWORD}" + cacert => "${PROD_ES_ES_CA_CERTS}" + } + elasticsearch { <4> + hosts => [ "${QA_ES_ES_HOSTS}" ] + user => "${QA_ES_ES_USER}" + password => "${QA_ES_ES_PASSWORD}" + cacert => "${QA_ES_ES_CA_CERTS}" + } + } + +---- + +<1> Define Elasticsearch references in the CRD. This will create the appropriate secrets to store certificate details and the rest of the connection information, and create environment variables to allow them to be referred to in Logstash pipeline configurations. +<2> This refers to an Elasticsearch cluster residing in the same namespace as the logstash instances +<3> This refers to an Elasticsearch cluster residing in a different namespace to the logstash instance +<4> Elasticsearch output definitions - use the environment variables created by the Logstash operator when specifying an `ElasticsearchRef`. Note the use of "normalized" versions of the `clusterName` in the environment variables used to populate the relevant fields + +[id="{p}-logstash-expose services"] +=== Expose Services + +By default, the Logstash operator creates a headless service for the metrics endpoint to enable metric collection by the metricbeat sidecar for stack monitoring: + ++ +[source,sh] +---- +kubectl get service quickstart-ls-api +---- ++ +[source,sh,subs="attributes"] +---- +NAME AVAILABLE EXPECTED AGE VERSION +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +quickstart-ls-api ClusterIP None 9600/TCP 48s +---- + +Additional services can be added by using the `spec.services` section of the resource: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +services: + - name: beats + service: + spec: + ports: + - port: 5044 + name: "winlogbeat" + protocol: TCP + - port: 5045 + name: "filebeat" + protocol: TCP ---- -[id="{p}-logstash-connect-es"] -=== Customize the connection to an Elasticsearch cluster +[id="{p}-logstash-pod-configuration"] +=== Pod configuration +You can <<{p}-customize-pods,customize the Logstash Pod>> using a Pod template. -The `elasticsearchRefs` element allows ECK to automatically configure Elastic Agent to establish a secured connection to one or more managed Elasticsearch clusters. By default, it targets all nodes in your cluster. If you want to direct traffic to specific nodes of your Elasticsearch cluster, refer to <<{p}-traffic-splitting>> for more information and examples. +The following example demonstrates how to create a Logstash deployment with custom node affinity, increased heap size, and resource limits. + +[source,yaml,subs="attributes"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash-sample +spec: + version: {version} + count: 1 + elasticsearchRef: + name: "elasticsearch-sample" + podTemplate: + spec: + containers: + - name: logtash + env: + - name: LS_JAVA_OPTS + value: "-Xmx2g -Xms2g" + resources: + requests: + memory: 1Gi + cpu: 0.5 + limits: + memory: 4Gi + cpu: 2 +---- + +The name of the container in the Pod template must be `logstash`. [id="{p}-logstash-configuration-examples"] @@ -331,30 +460,86 @@ kubectl apply -f {logstash_recipes}/logstash-monitored.yaml Deploys an Elasticsearch and Kibana monitoring cluster, and a Logstash that will send it's monitoring information to this cluster. You can view the stack monitoring information in the monitoring cluster's Kibana -[id="{p}-logstash-on-eck-limitations"] -== Logstash on ECK Limitations +=== Multiple pipelines/multiple es clusters + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-multi.yaml +---- + +Deploys Elasticsearch and Kibana in prod and qa configurations, running in separate namespaces. Logstash is configured with a multiple pipeline->pipeline configuration, with a source pipeline routing to prod and qa pipelines. + +[id="{p}-logstash-advanced-configuration"] +== Advanced Configuration + +[id="{p}-logstash-jvm-options"] +=== Setting JVM Options + + +Changes to JVM settings should be changed by using the `LS_JAVA_OPTS` environment variable to override default settings in `jvm.options`. This approach ensures that expected settings from `jvm.options` are set, and only options that explicitly need to be overridden are. + +To do, this, set the `LS_JAVA_OPTS` environment variable in the container definition of your Logstash resource: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + containers: + - name: logstash + env: + - name: LS_JAVA_OPTS <1> + value: "-Xmx2g -Xms2g" +---- +<1> This will change the maximum and minimum heap size of the JVM on each pod to 2GB + +[id="{p}-logstash-scaling-logstash"] +== Scaling Logstash + +* The ability to scale Logstash is highly dependent on the pipeline configurations, and the plugins used in those pipelines. Not all logstash deployments can be scaled horizontally by increasing the number of Logstash pods defined in the Logstash resource - depending on the plugins being used, this could result in data loss/duplication of data or pods running idle unable to be utilized. +* Particular care should be taken with plugins that: +** Retrieve data from external sources. +*** Plugins that retrieve data from external sources, and require some level of coordination between nodes to split up work, are not good candidates for scaling horizontally, and would likely produce some data duplication. These are plugins such as the JDBC input plugin, which has no automatic way to split queries across logstash instances, or the S3 input, which has no way to split which buckets to read across logstash instances. +*** Plugins that retrieve data from external sources, where work is distributed externally to logstash, but may impose their own limits. These are plugins like the kafka input, or azure event hubs, where the parallelism is limited by the number of partitions vs the number of consumers. In cases like this, extra logstash pods may be idle if the number of consumer threads multiplied by the number of pods is greater than the number of partitions. +** Plugins that require events to be received in order. +*** Certain plugins, such as the aggregate filter, expect events to be received in strict order to run without error or data loss. Any plugin that requires the number of pipeline workers to be `1` will also have issues when horizontal scaling is used. +* If the pipeline does not contain any such plugin, the number of Logstash instances can be increased by setting the `count` property in the Logstash resource: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 3 +---- + -* When running Logstash on ECK, it is important to understand how data is sent into Logstash when determining replica counts of pods. Pipelines that include plugins that need to store state, or cannot automatically distribute work across pods should be treated with care as data loss and/or duplication can result from incorrectly configured setups -* To that end, the technical preview should only be run with a limited set of input plugins: [id="{p}-logstash-technical-preview-limitations"] == Technical Preview Limitations -Note that this release is a technical preview, is still under active development and has limited functionality. -Limitations include, but are not limited to: - -* Limited support for plugins - -** Input plugins: logstash-input-azure_event_hubs, logstash-input-beats, logstash-input-elastic_agent, logstash-input-kafka, logstash-input-tcp, logstash-input-http -** While most filter plugins are supported, the following plugins are not currently supported: -*** logstash-filter-jdbc_static, logstash-filter-jdbc_streaming, logstash-filter-aggregate -*** Other filters may require additional manual work to mount volumes -** While most output plugins are supported, the following plugins are not currently supported, or may require manual work to be operational: -*** logstash-output-s3 - requires a volume mount to store in progress work to avoid data loss -*** logstash-output-jms - requires jar files to be placed on the logstash classpath +Note that this release is a technical preview, is still under active development and has additional limitations: * No support for persistence -* `ElasticsearchRef` implementation in plugins in preview mode +** The operator provides no additional support for persistence, including PQ, DLQ support and plugins that may require persistent storage to keep track of state. +* `ElasticsearchRef` implementation in plugins is still in preview mode ** In preview mode, plugins will need to be populated with environment variables populated by the Logstash operator. -* No `ElasticsearchRef` support for Pipeline Central Management -** Manual configuration required in `Config`/`ConfigRef` + + +* Limited support for plugins. Note that this is not an exhaustive list, and plugins outside of the logstash plugin matrix have not been considered for this list: +** Input plugins: +*** The following plugins are supported: logstash-input-azure_event_hubs, logstash-input-beats, logstash-input-elastic_agent, logstash-input-kafka, logstash-input-tcp, logstash-input-http, logstash-input-udp +** Filter Plugins: +*** The following plugins are *not* supported: logstash-filter-jdbc_static, logstash-filter-jdbc_streaming, logstash-filter-aggregate +*** Note: Other filters may require additional manual work to mount volumes for certain configurations +** Output Plugins: +*** The following plugins require manual work to be operational: +*** logstash-output-s3 - requires a volume mount to store in progress work to avoid data loss. + + From d14faf08b561fc43f58e4d0a6f59aacc4664d477 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Tue, 28 Mar 2023 16:39:00 -0400 Subject: [PATCH 13/26] Improvements --- config/recipes/logstash/README.asciidoc | 2 +- .../logstash.asciidoc | 22 +++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/config/recipes/logstash/README.asciidoc b/config/recipes/logstash/README.asciidoc index eda4895ef6..9ec279a1ba 100644 --- a/config/recipes/logstash/README.asciidoc +++ b/config/recipes/logstash/README.asciidoc @@ -8,7 +8,7 @@ endif::[] = Using Logstash with ECK -These recipes demonstrate how to run the Logstash, Elasticsearch, Kibana and Filebeat deployed via ECK, using the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] as a starting point +These recipes demonstrate how to run Logstash, Elasticsearch, Kibana and Filebeat deployed via ECK, using the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] as a starting point ===== Inline Pipeline usage - `logstash-eck.yaml` diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 72ce6ce087..d5bf737343 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -17,6 +17,7 @@ This section describes how to configure and deploy Logstash with ECK. ** <<{p}-logstash-configuring-logstash,Configuring Logstash>> ** <<{p}-logstash-pipelines,Configuring Pipelines>> ** <<{p}-logstash-pipelines-es,Using Elasticsearch in Logstash Pipelines>> +** <<{p}-logstash-expose-services,Exposing Services>> * <<{p}-logstash-configuration-examples,Configuration examples>> * <<{p}-logstash-advanced-configuration,Advanced Configuration>> ** <<{p}-logstash-jvm-options,Setting JVM Options>> @@ -98,7 +99,7 @@ NAME READY STATUS RESTARTS AGE quickstart-ls-0 1/1 Running 0 91s ---- -. Access logs for one of the Pods. +. Access logs for a pod. + [source,sh] ---- @@ -111,7 +112,7 @@ kubectl logs -f quickstart-ls-0 [id="{p}-logstash-upgrade-specification"] === Upgrade the Logstash specification -You can upgrade the Logstash version or change settings by editing the YAML specification. ECK applies the changes by performing a rolling restart of Logstash's Pods. +You can upgrade the Logstash version or change settings by editing the YAML specification. ECK applies the changes by performing a rolling restart of Logstash Pods. [id="{p}-logstash-configuring-logstash"] === Logstash configuration @@ -199,7 +200,7 @@ spec: } ---- -Alternatively, it can be provided through a Secret specified in the `spec.pipelinesRef` element. The Secret must have an `logstash.yml` entry with this configuration: +Alternatively, it can be provided through a Secret specified in the `spec.pipelinesRef` element. The Secret must have a `logstash.yml` entry with this configuration: [source,yaml,subs="attributes,+macros"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 @@ -239,7 +240,7 @@ stringData: ---- -Logstash on ECK supports all options in `pipelines.yml`, including settings to update the number of workers, and +Logstash on ECK will* support all options present in `pipelines.yml`, including settings to update the number of workers, and the size of the batch that the pipeline will process. This also includes using `path.config` to point to volumes mounted on the logstash container: @@ -273,7 +274,7 @@ spec: } ---- -NOTE: Persistent Queues and Dead Letter Queues will be supported in a later release, but are not currently suppored. +NOTE: Persistent Queues and Dead Letter Queues will be supported in a later release, but are not currently supported. [id="{p}-logstash-pipelines-es"] @@ -335,7 +336,7 @@ spec: <3> This refers to an Elasticsearch cluster residing in a different namespace to the logstash instance <4> Elasticsearch output definitions - use the environment variables created by the Logstash operator when specifying an `ElasticsearchRef`. Note the use of "normalized" versions of the `clusterName` in the environment variables used to populate the relevant fields -[id="{p}-logstash-expose services"] +[id="{p}-logstash-expose-services"] === Expose Services By default, the Logstash operator creates a headless service for the metrics endpoint to enable metric collection by the metricbeat sidecar for stack monitoring: @@ -348,7 +349,6 @@ kubectl get service quickstart-ls-api + [source,sh,subs="attributes"] ---- -NAME AVAILABLE EXPECTED AGE VERSION NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE quickstart-ls-api ClusterIP None 9600/TCP 48s ---- @@ -372,9 +372,9 @@ services: [id="{p}-logstash-pod-configuration"] === Pod configuration -You can <<{p}-customize-pods,customize the Logstash Pod>> using a Pod template. +You can <<{p}-customize-pods,customize the Logstash Pod>> using a Pod template, defined in the `spec.podTemplate` section of the configuration. -The following example demonstrates how to create a Logstash deployment with custom node affinity, increased heap size, and resource limits. +The following example demonstrates how to create a Logstash deployment with increased heap size, and resource limits. [source,yaml,subs="attributes"] ---- @@ -411,7 +411,7 @@ The name of the container in the Pod template must be `logstash`. This section contains manifests that illustrate common use cases, and can be your starting point in exploring Logstash deployed with ECK. These manifests are self-contained and work out-of-the-box on any non-secured Kubernetes cluster. They all contain a three-node Elasticsearch cluster and a single Kibana instance. -CAUTION: The examples in this section are for illustration purposes only and should not be considered to be production-ready. Some of these examples use the `node.store.allow_mmap: false` setting which has performance implications and should be tuned for production workloads, as described in <<{p}-virtual-memory>>. +CAUTION: The examples in this section are for illustration purposes only and should not be considered to be production-ready. Some of these examples use the `node.store.allow_mmap: false` setting on Elasticsearch which has performance implications and should be tuned for production workloads, as described in <<{p}-virtual-memory>>. === Single Pipeline defined in CRD @@ -540,6 +540,6 @@ Note that this release is a technical preview, is still under active development *** Note: Other filters may require additional manual work to mount volumes for certain configurations ** Output Plugins: *** The following plugins require manual work to be operational: -*** logstash-output-s3 - requires a volume mount to store in progress work to avoid data loss. +**** logstash-output-s3 - requires a volume mount to store in progress work to avoid data loss. From f9ba1ffb68ff24e8f084165810f722406242d51d Mon Sep 17 00:00:00 2001 From: Kaise Cheng Date: Thu, 20 Apr 2023 19:07:19 +0100 Subject: [PATCH 14/26] add recipes for ElasticsearchRefs --- config/recipes/logstash/README.asciidoc | 4 ++ config/recipes/logstash/logstash-es-role.yaml | 54 +++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 config/recipes/logstash/logstash-es-role.yaml diff --git a/config/recipes/logstash/README.asciidoc b/config/recipes/logstash/README.asciidoc index 9ec279a1ba..2d11f752c2 100644 --- a/config/recipes/logstash/README.asciidoc +++ b/config/recipes/logstash/README.asciidoc @@ -26,5 +26,9 @@ Deploys Logstash with the pipeline details defined in the CRD, and the pipeline Deploys Logstash and a dedicated Elasticsearch and Kibana monitoring cluster, and sends Logstash monitoring data to that cluster. +===== Logstash and Elasticsearch with custom role - `logstash-es-role.yaml` + +Deploys Logstash and Elasticsearch and a Secret to customize Elasticsearch role `eck_logstash_user_role`. The role is essential for Logstash to have privileges to write document to custom index "my-index". CAUTION: These recipes use the `node.store.allow_mmap: false` configuration value to avoid configuring memory mapping settings on the underlying host. This could have a significant performance impact on your Elasticsearch cluster and should not be used in production without careful consideration. See https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html for more information. + diff --git a/config/recipes/logstash/logstash-es-role.yaml b/config/recipes/logstash/logstash-es-role.yaml new file mode 100644 index 0000000000..cc5c910f28 --- /dev/null +++ b/config/recipes/logstash/logstash-es-role.yaml @@ -0,0 +1,54 @@ +kind: Secret +apiVersion: v1 +metadata: + name: my-roles-secret +stringData: + roles.yml: |- + eck_logstash_user_role: + cluster: [ "monitor", "manage_ilm", "read_ilm", "manage_logstash_pipelines", "manage_index_templates", "cluster:admin/ingest/pipeline/get"] + indices: + - names: [ "my-index", "logstash", "logstash-*", "ecs-logstash", "ecs-logstash-*", "logs-*", "metrics-*", "synthetics-*", "traces-*" ] + privileges: [ "manage", "write", "create_index", "read", "view_index_metadata" ] +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch-sample +spec: + version: 8.7.0 + auth: + roles: + - secretName: my-roles-secret + nodeSets: + - name: default + count: 2 + config: + node.store.allow_mmap: false +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash-sample +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - name: elasticsearch-sample + pipelines: + - pipeline.id: main + config.string: | + input { exec { command => "uptime" interval => 10 } } + output { + elasticsearch { + hosts => [ "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_HOSTS}" ] + ssl => true + cacert => "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_SSL_CERTIFICATE_AUTHORITY}" + user => "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_USERNAME}" + password => "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_PASSWORD}" + index => "my-index" + data_stream => false + ilm_enabled => false + manage_template => false + } + } +--- \ No newline at end of file From 38de4ddd4088797b01cdf19efc3fe8fef6be3aa9 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Mon, 3 Apr 2023 17:39:38 -0400 Subject: [PATCH 15/26] Responded to code review comments --- config/recipes/logstash/logstash-eck.yaml | 20 +++-- .../recipes/logstash/logstash-monitored.yaml | 12 +-- config/recipes/logstash/logstash-multi.yaml | 76 ++----------------- .../logstash/logstash-pipelinesecret.yaml | 8 +- .../logstash/logstash-pipelinevolume.yaml | 8 +- .../advanced-topics/stack-monitoring.asciidoc | 2 + 6 files changed, 36 insertions(+), 90 deletions(-) diff --git a/config/recipes/logstash/logstash-eck.yaml b/config/recipes/logstash/logstash-eck.yaml index 407cae1fde..b3c9a32c9b 100644 --- a/config/recipes/logstash/logstash-eck.yaml +++ b/config/recipes/logstash/logstash-eck.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: elasticsearch spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -23,7 +23,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: kibana spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: elasticsearch @@ -37,7 +37,7 @@ metadata: app.kubernetes.io/component: filebeat spec: type: filebeat - version: 8.6.1 + version: 8.7.0 config: filebeat.inputs: - type: log @@ -83,10 +83,10 @@ metadata: app.kubernetes.io/component: logstash spec: count: 1 - version: 8.6.1 -# elasticsearchRefs: -# - clusterName: eck -# name: elasticsearch + version: 8.7.0 + elasticsearchRefs: + - clusterName: default + name: elasticsearch pipelines: - pipeline.id: main config.string: | @@ -105,7 +105,11 @@ spec: } } output { - stdout { codec => rubydebug } + elasticsearch { + hosts => [ "${DEFAULT_ELASTICSEARCH_HOSTS}" ] + user => "${DEFAULT_ELASTICSEARCH_USER}" + password => "${DEFAULT_ELASTICSEARCH_PASSWORD}" + cacert => "${DEFAULT_ELASTICSEARCH_CA_CERTS}" } services: - name: beats diff --git a/config/recipes/logstash/logstash-monitored.yaml b/config/recipes/logstash/logstash-monitored.yaml index 6edc637409..79c2ed1977 100644 --- a/config/recipes/logstash/logstash-monitored.yaml +++ b/config/recipes/logstash/logstash-monitored.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: elasticsearch spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -23,7 +23,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: kibana spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: elasticsearch @@ -37,7 +37,7 @@ metadata: app.kubernetes.io/component: filebeat spec: type: filebeat - version: 8.6.1 + version: 8.7.0 config: filebeat.inputs: - type: log @@ -83,7 +83,7 @@ metadata: app.kubernetes.io/component: logstash spec: count: 1 - version: 8.6.1 + version: 8.7.0 # elasticsearchRefs: # - clusterName: eck # name: elasticsearch @@ -127,7 +127,7 @@ kind: Elasticsearch metadata: name: elasticsearch-monitoring spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -139,7 +139,7 @@ kind: Kibana metadata: name: kibana-monitoring spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: elasticsearch-monitoring diff --git a/config/recipes/logstash/logstash-multi.yaml b/config/recipes/logstash/logstash-multi.yaml index ddef053b1b..26feaddce3 100644 --- a/config/recipes/logstash/logstash-multi.yaml +++ b/config/recipes/logstash/logstash-multi.yaml @@ -15,7 +15,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: elasticsearch spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -32,7 +32,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: kibana spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: qa @@ -45,7 +45,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: elasticsearch spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -61,7 +61,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: kibana spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: production @@ -75,7 +75,7 @@ metadata: app.kubernetes.io/component: filebeat spec: type: filebeat - version: 8.6.1 + version: 8.7.0 config: filebeat.inputs: - type: log @@ -121,7 +121,7 @@ metadata: app.kubernetes.io/component: logstash spec: count: 1 - version: 8.6.1 + version: 8.7.0 elasticsearchRefs: - clusterName: prod-es name: production @@ -133,66 +133,6 @@ spec: elasticsearchRefs: - name: elasticsearch-monitoring pipelines: - - pipeline.id: main - config.string: | - input { - beats { - port => 5044 - } - } - filter { - grok { - match => { "message" => "%{HTTPD_COMMONLOG}"} - } - geoip { - source => "[source][address]" - target => "[source]" - } - } - output { - elasticsearch { - hosts => [ "${PROD_ES_ES_HOSTS}" ] - user => "${PROD_ES_ES_USER}" - password => "${PROD_ES_ES_PASSWORD}" - cacert => "${PROD_ES_ES_CA_CERTS}" - } - elasticsearch { - hosts => [ "${QA_ES_ES_HOSTS}" ] - user => "${QA_ES_ES_USER}" - password => "${QA_ES_ES_PASSWORD}" - cacert => "${QA_ES_ES_CA_CERTS}" - } - } - - pipeline.id: main - config.string: | - input { - beats { - port => 5044 - } - } - filter { - grok { - match => { "message" => "%{HTTPD_COMMONLOG}"} - } - geoip { - source => "[source][address]" - target => "[source]" - } - } - output { - elasticsearch { - hosts => [ "${PROD_ES_ES_HOSTS}" ] - user => "${PROD_ES_ES_USER}" - password => "${PROD_ES_ES_PASSWORD}" - cacert => "${PROD_ES_ES_CA_CERTS}" - } - elasticsearch { - hosts => [ "${QA_ES_ES_HOSTS}" ] - user => "${QA_ES_ES_USER}" - password => "${QA_ES_ES_PASSWORD}" - cacert => "${QA_ES_ES_CA_CERTS}" - } - } - pipeline.id: main config.string: | input { @@ -262,7 +202,7 @@ kind: Elasticsearch metadata: name: elasticsearch-monitoring spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -274,7 +214,7 @@ kind: Kibana metadata: name: kibana-monitoring spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: elasticsearch-monitoring diff --git a/config/recipes/logstash/logstash-pipelinesecret.yaml b/config/recipes/logstash/logstash-pipelinesecret.yaml index 697d879b72..a6e6e65c03 100644 --- a/config/recipes/logstash/logstash-pipelinesecret.yaml +++ b/config/recipes/logstash/logstash-pipelinesecret.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: elasticsearch spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -23,7 +23,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: kibana spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: elasticsearch @@ -37,7 +37,7 @@ metadata: app.kubernetes.io/component: filebeat spec: type: filebeat - version: 8.6.1 + version: 8.7.0 config: filebeat.inputs: - type: log @@ -83,7 +83,7 @@ metadata: app.kubernetes.io/component: logstash spec: count: 1 - version: 8.6.1 + version: 8.7.0 # elasticsearchRefs: # - clusterName: eck # name: elasticsearch diff --git a/config/recipes/logstash/logstash-pipelinevolume.yaml b/config/recipes/logstash/logstash-pipelinevolume.yaml index eab23dc81d..7f162eb635 100644 --- a/config/recipes/logstash/logstash-pipelinevolume.yaml +++ b/config/recipes/logstash/logstash-pipelinevolume.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: elasticsearch spec: - version: 8.6.1 + version: 8.7.0 nodeSets: - name: default count: 3 @@ -23,7 +23,7 @@ metadata: app.kubernetes.io/name: eck-logstash app.kubernetes.io/component: kibana spec: - version: 8.6.1 + version: 8.7.0 count: 1 elasticsearchRef: name: elasticsearch @@ -37,7 +37,7 @@ metadata: app.kubernetes.io/component: filebeat spec: type: filebeat - version: 8.6.1 + version: 8.7.0 config: filebeat.inputs: - type: log @@ -83,7 +83,7 @@ metadata: app.kubernetes.io/component: logstash spec: count: 1 - version: 8.6.1 + version: 8.7.0 # elasticsearchRefs: # - clusterName: eck # name: elasticsearch diff --git a/docs/advanced-topics/stack-monitoring.asciidoc b/docs/advanced-topics/stack-monitoring.asciidoc index b541da6e10..a43c22a8f2 100644 --- a/docs/advanced-topics/stack-monitoring.asciidoc +++ b/docs/advanced-topics/stack-monitoring.asciidoc @@ -103,6 +103,8 @@ NOTE: If Stack Monitoring is configured for a Beat, but the corresponding Elasti NOTE: If Logs Stack Monitoring is configured for a Beat, and custom container arguments (`podTemplate.spec.containers[].args`) include `-e`, which enables logging to stderr and disables log file output, this argument will be removed from the Pod to allow the Filebeat sidecar to consume the Beat's log files. +NOTE: Stack Monitoring for Logstash on ECK is only available for Logstash versions 8.7.0 and above. + IMPORTANT: The monitoring cluster must be managed by ECK in the same Kubernetes cluster as the monitored one. You can send metrics and logs to two different Elasticsearch monitoring clusters. From a29d9f0660691a0f3dac639fc9d5bbf26608879e Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Mon, 24 Apr 2023 15:16:21 -0400 Subject: [PATCH 16/26] Updates for elasticsearchRef. A little tidying --- config/recipes/logstash/logstash-es-role.yaml | 9 +++++---- .../logstash.asciidoc | 20 ++++++++++++------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/config/recipes/logstash/logstash-es-role.yaml b/config/recipes/logstash/logstash-es-role.yaml index cc5c910f28..06d1a2c64d 100644 --- a/config/recipes/logstash/logstash-es-role.yaml +++ b/config/recipes/logstash/logstash-es-role.yaml @@ -34,17 +34,18 @@ spec: version: 8.7.0 elasticsearchRefs: - name: elasticsearch-sample + clusterName: sample pipelines: - pipeline.id: main config.string: | input { exec { command => "uptime" interval => 10 } } output { elasticsearch { - hosts => [ "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_HOSTS}" ] + hosts => [ "${SAMPLE_ES_HOSTS}" ] ssl => true - cacert => "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_SSL_CERTIFICATE_AUTHORITY}" - user => "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_USERNAME}" - password => "${DEFAULT_ELASTICSEARCH_SAMPLE_ES_PASSWORD}" + cacert => "${SAMPLE_ES_SSL_CERTIFICATE_AUTHORITY}" + user => "${SAMPLE_ES_USERNAME}" + password => "${SAMPLE_ES_PASSWORD}" index => "my-index" data_stream => false ilm_enabled => false diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index d5bf737343..c3c2283756 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -41,7 +41,10 @@ metadata: name: quickstart spec: count: 1 - version: 8.6.1 + elasticsearchRefs: + - name: quickstart + clusterName: quickstart + version: {version} pipelines: - pipeline.id: main config.string: | @@ -291,6 +294,8 @@ The environment variables have a fixed naming convention: where NORMALIZED_CLUSTERNAME is the value taken from the `clusterName` field of the `elasticsearchRef` property, capitalized, and `-` transformed to `_` - eg, prod-es, would becomed PROD_ES. +NOTE: The `clusterName` value should be unique across namespaces. + [source,yaml,subs="attributes,+macros,callouts"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 @@ -385,8 +390,9 @@ metadata: spec: version: {version} count: 1 - elasticsearchRef: - name: "elasticsearch-sample" + elasticsearchRefs: + - name: "elasticsearch-sample" + clusterName: "sample" podTemplate: spec: containers: @@ -526,10 +532,10 @@ spec: Note that this release is a technical preview, is still under active development and has additional limitations: -* No support for persistence -** The operator provides no additional support for persistence, including PQ, DLQ support and plugins that may require persistent storage to keep track of state. -* `ElasticsearchRef` implementation in plugins is still in preview mode -** In preview mode, plugins will need to be populated with environment variables populated by the Logstash operator. +* No integrated support for persistence +** The operator provides no integrated support for persistence, including PQ, DLQ support and plugins that may require persistent storage to keep track of state - any persistence should be added manually. +* `ElasticsearchRef` implementation in plugins is in preview mode +** Adding elasticsearch to plugin definitions requires the use of environment variables populated by the Logstash operator, which may change in future versions of the logstash operator. * Limited support for plugins. Note that this is not an exhaustive list, and plugins outside of the logstash plugin matrix have not been considered for this list: From f07d6cefa913685d38ad148aa82cc31d0d390f56 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Mon, 24 Apr 2023 16:42:08 -0400 Subject: [PATCH 17/26] Add role information to elasticsearch integration --- .../logstash.asciidoc | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index c3c2283756..081054d59b 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -296,6 +296,18 @@ where NORMALIZED_CLUSTERNAME is the value taken from the `clusterName` field of NOTE: The `clusterName` value should be unique across namespaces. +NOTE: The logstash ECK operator will create a user called `eck_logstash_user_role` when an `elasticsearchRef` is specified. This user has the following permissions: +``` + "cluster": ["monitor", "manage_ilm", "read_ilm", "manage_logstash_pipelines", "manage_index_templates", "cluster:admin/ingest/pipeline/get",], + "indices": [ + { + "names": [ "logstash", "logstash-*", "ecs-logstash", "ecs-logstash-*", "logs-*", "metrics-*", "synthetics-*", "traces-*" ], + "privileges": ["manage", "write", "create_index", "read", "view_index_metadata"] + } + +``` +The permissions for this user <<{p}-users-and-roles,can be updated>> to include more indices if the elasticsearch plugin is expected to use indices other than the default. See <<{p}-logstash-configuration-examples, logstash configuration examples>> for a sample configuration that creates a user to write to a custom index. + [source,yaml,subs="attributes,+macros,callouts"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 @@ -341,6 +353,8 @@ spec: <3> This refers to an Elasticsearch cluster residing in a different namespace to the logstash instance <4> Elasticsearch output definitions - use the environment variables created by the Logstash operator when specifying an `ElasticsearchRef`. Note the use of "normalized" versions of the `clusterName` in the environment variables used to populate the relevant fields + + [id="{p}-logstash-expose-services"] === Expose Services @@ -457,6 +471,16 @@ kubectl apply -f {logstash_recipes}/logstash-pipelinevolume.yaml Deploys Logstash with a single pipeline defined in a secret, mounted as a volume, and referenced by `path.config` +=== Writing to a custom elasticsearch index + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-es-role.yaml +---- + +Deploys Logstash and Elasticsearch, and creates an updated version of the `eck_logstash_user_role` to write to a user specified index. + + === Elasticsearch and Kibana Stack Monitoring [source,sh,subs="attributes"] From 2f45d1925ece0d9ca3d1c53d0bfa315c4c083a34 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Mon, 24 Apr 2023 18:16:22 -0400 Subject: [PATCH 18/26] Fix env variable names in example snippets --- .../logstash.asciidoc | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 081054d59b..db4516f0a7 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -43,7 +43,7 @@ spec: count: 1 elasticsearchRefs: - name: quickstart - clusterName: quickstart + clusterName: qs version: {version} pipelines: - pipeline.id: main @@ -55,17 +55,17 @@ spec: } output { elasticsearch { - hosts => [ "${QUICKSTART_ES_HOSTS}" ] - user => "${QUICKSTART_ES_USER}" - password => "${QUICKSTART_ES_PASSWORD}" - cacert => "${QUICKSTART_ES_CA_CERTS}" + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USERNAME}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } } services: - name: beats service: spec: - type: ClusterIP + type: NodePort ports: - port: 5044 name: "filebeat" @@ -133,7 +133,7 @@ spec: count: 1 elasticsearchRefs: - name: quickstart - clusterName: quickstart + clusterName: qs config: pipeline.workers: 4 <1> log.level: debug @@ -153,7 +153,7 @@ spec: count: 1 elasticsearchRefs: - name: quickstart - clusterName: quickstart + clusterName: qs configRef: secretName: quickstart-config --- @@ -183,7 +183,7 @@ spec: version: {version} count: 1 elasticsearchRefs: - - clusterName: quickstart + - clusterName: qs name: quickstart pipelines: - pipeline.id: main @@ -195,10 +195,10 @@ spec: } output { elasticsearch { - hosts => [ "${QUICKSTART_ES_HOSTS}" ] - user => "${QUICKSTART_ES_USER}" - password => "${QUICKSTART_ES_PASSWORD}" - cacert => "${QUICKSTART_ES_CA_CERTS}" + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USERNAME}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } } ---- @@ -214,7 +214,7 @@ spec: version: {version} count: 1 elasticsearchRefs: - - clusterName: quickstart + - clusterName: qs name: quickstart pipelinesRef: secretName: quickstart-pipeline @@ -234,10 +234,10 @@ stringData: } output { elasticsearch { - hosts => [ "${QUICKSTART_ES_HOSTS}" ] - user => "${QUICKSTART_ES_USER}" - password => "${QUICKSTART_ES_PASSWORD}" - cacert => "${QUICKSTART_ES_CA_CERTS}" + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USERNAME}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } } @@ -257,7 +257,7 @@ spec: version: {version} count: 1 elasticsearchRefs: - - clusterName: quickstart + - clusterName: qs name: quickstart pipelines: - pipeline.id: main @@ -269,10 +269,10 @@ spec: } output { elasticsearch { - hosts => [ "${QUICKSTART_ES_HOSTS}" ] - user => "${QUICKSTART_ES_USER}" - password => "${QUICKSTART_ES_PASSWORD}" - cacert => "${QUICKSTART_ES_CA_CERTS}" + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USERNAME}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } } ---- @@ -290,7 +290,7 @@ The environment variables have a fixed naming convention: `NORMALIZED_CLUSTERNAME_ES_HOSTS` `NORMALIZED_CLUSTERNAME_ES_USERNAME` `NORMALIZED_CLUSTERNAME_ES_PASSWORD` -`NORMALIZED_CLUSTERNAME_ES_CA_CERTS` +`NORMALIZED_CLUSTERNAME_ES_SSL_CERTIFICATE_AUTHORITY}` where NORMALIZED_CLUSTERNAME is the value taken from the `clusterName` field of the `elasticsearchRef` property, capitalized, and `-` transformed to `_` - eg, prod-es, would becomed PROD_ES. @@ -334,15 +334,15 @@ spec: output { elasticsearch { <4> hosts => [ "${PROD_ES_ES_HOSTS}" ] - user => "${PROD_ES_ES_USER}" + user => "${PROD_ES_ES_USERNAME}" password => "${PROD_ES_ES_PASSWORD}" - cacert => "${PROD_ES_ES_CA_CERTS}" + cacert => "${PROD_ES_ES_SSL_CERTIFICATE_AUTHORITY}" } elasticsearch { <4> hosts => [ "${QA_ES_ES_HOSTS}" ] - user => "${QA_ES_ES_USER}" + user => "${QA_ES_ES_USERNAME}" password => "${QA_ES_ES_PASSWORD}" - cacert => "${QA_ES_ES_CA_CERTS}" + cacert => "${QA_ES_ES_SSL_CERTIFICATE_AUTHORITY}" } } From add2f2cb1dea8127146906f1eed1d61a660f1af4 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Tue, 25 Apr 2023 10:58:37 -0400 Subject: [PATCH 19/26] Fix recipes --- config/recipes/logstash/logstash-eck.yaml | 11 ++-- config/recipes/logstash/logstash-es-role.yaml | 16 ++--- .../recipes/logstash/logstash-monitored.yaml | 13 ++-- config/recipes/logstash/logstash-multi.yaml | 61 ++++++++++--------- .../logstash/logstash-pipelinesecret.yaml | 13 ++-- .../logstash/logstash-pipelinevolume.yaml | 17 ++++-- .../logstash.asciidoc | 14 ++--- 7 files changed, 81 insertions(+), 64 deletions(-) diff --git a/config/recipes/logstash/logstash-eck.yaml b/config/recipes/logstash/logstash-eck.yaml index b3c9a32c9b..d24dfbe8a6 100644 --- a/config/recipes/logstash/logstash-eck.yaml +++ b/config/recipes/logstash/logstash-eck.yaml @@ -85,7 +85,7 @@ spec: count: 1 version: 8.7.0 elasticsearchRefs: - - clusterName: default + - clusterName: eck name: elasticsearch pipelines: - pipeline.id: main @@ -106,10 +106,11 @@ spec: } output { elasticsearch { - hosts => [ "${DEFAULT_ELASTICSEARCH_HOSTS}" ] - user => "${DEFAULT_ELASTICSEARCH_USER}" - password => "${DEFAULT_ELASTICSEARCH_PASSWORD}" - cacert => "${DEFAULT_ELASTICSEARCH_CA_CERTS}" + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } } services: - name: beats diff --git a/config/recipes/logstash/logstash-es-role.yaml b/config/recipes/logstash/logstash-es-role.yaml index 06d1a2c64d..3c9639169a 100644 --- a/config/recipes/logstash/logstash-es-role.yaml +++ b/config/recipes/logstash/logstash-es-role.yaml @@ -13,7 +13,7 @@ stringData: apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch metadata: - name: elasticsearch-sample + name: elasticsearch spec: version: 8.7.0 auth: @@ -28,24 +28,24 @@ spec: apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash metadata: - name: logstash-sample + name: logstash spec: count: 1 version: 8.7.0 elasticsearchRefs: - - name: elasticsearch-sample - clusterName: sample + - name: elasticsearch + clusterName: eck pipelines: - pipeline.id: main config.string: | input { exec { command => "uptime" interval => 10 } } output { elasticsearch { - hosts => [ "${SAMPLE_ES_HOSTS}" ] + hosts => [ "${ECK_ES_HOSTS}" ] ssl => true - cacert => "${SAMPLE_ES_SSL_CERTIFICATE_AUTHORITY}" - user => "${SAMPLE_ES_USERNAME}" - password => "${SAMPLE_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" index => "my-index" data_stream => false ilm_enabled => false diff --git a/config/recipes/logstash/logstash-monitored.yaml b/config/recipes/logstash/logstash-monitored.yaml index 79c2ed1977..8d57409265 100644 --- a/config/recipes/logstash/logstash-monitored.yaml +++ b/config/recipes/logstash/logstash-monitored.yaml @@ -84,9 +84,9 @@ metadata: spec: count: 1 version: 8.7.0 - # elasticsearchRefs: - # - clusterName: eck - # name: elasticsearch + elasticsearchRefs: + - clusterName: eck + name: elasticsearch monitoring: metrics: elasticsearchRefs: @@ -109,7 +109,12 @@ spec: } } output { - stdout { codec => rubydebug } + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } } services: - name: beats diff --git a/config/recipes/logstash/logstash-multi.yaml b/config/recipes/logstash/logstash-multi.yaml index 26feaddce3..e77b6c3feb 100644 --- a/config/recipes/logstash/logstash-multi.yaml +++ b/config/recipes/logstash/logstash-multi.yaml @@ -122,12 +122,12 @@ metadata: spec: count: 1 version: 8.7.0 - elasticsearchRefs: - - clusterName: prod-es - name: production - - clusterName: qa-es - name: qa - namespace: qa + elasticsearchRefs: + - clusterName: prod-es + name: production + - clusterName: qa-es + name: qa + namespace: qa monitoring: metrics: elasticsearchRefs: @@ -157,35 +157,36 @@ spec: send_to => 'qa' } } - - pipeline.id: production - config.string: | - input { - pipeline { - address => 'prod' - } + - pipeline.id: production + config.string: | + input { + pipeline { + address => 'prod' } - output { - elasticsearch { + } + output { + elasticsearch { hosts => [ "${PROD_ES_ES_HOSTS}" ] user => "${PROD_ES_ES_USER}" password => "${PROD_ES_ES_PASSWORD}" - cacert => "${PROD_ES_ES_CA_CERTS}" + cacert => "${PROD_ES_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + - pipeline.id: qa + config.string: | + input { + pipeline { + address => 'qa' } - - pipeline.id: qa - config.string: | - input { - pipeline { - address => 'qa' - } - } - output { - elasticsearch { - hosts => [ "${QA_ES_ES_HOSTS}" ] - user => "${QA_ES_ES_USER}" - password => "${QA_ES_ES_PASSWORD}" - cacert => "${QA_ES_ES_CA_CERTS}" - } - } + } + output { + elasticsearch { + hosts => [ "${QA_ES_ES_HOSTS}" ] + user => "${QA_ES_ES_USER}" + password => "${QA_ES_ES_PASSWORD}" + cacert => "${QA_ES_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } services: - name: beats service: diff --git a/config/recipes/logstash/logstash-pipelinesecret.yaml b/config/recipes/logstash/logstash-pipelinesecret.yaml index a6e6e65c03..c2ab283ede 100644 --- a/config/recipes/logstash/logstash-pipelinesecret.yaml +++ b/config/recipes/logstash/logstash-pipelinesecret.yaml @@ -84,9 +84,9 @@ metadata: spec: count: 1 version: 8.7.0 -# elasticsearchRefs: -# - clusterName: eck -# name: elasticsearch + elasticsearchRefs: + - clusterName: eck + name: elasticsearch pipelinesRef: secretName: logstash-pipeline services: @@ -125,5 +125,10 @@ stringData: } } output { - stdout { codec => rubydebug } + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } } diff --git a/config/recipes/logstash/logstash-pipelinevolume.yaml b/config/recipes/logstash/logstash-pipelinevolume.yaml index 7f162eb635..c4ef6efd3a 100644 --- a/config/recipes/logstash/logstash-pipelinevolume.yaml +++ b/config/recipes/logstash/logstash-pipelinevolume.yaml @@ -84,9 +84,9 @@ metadata: spec: count: 1 version: 8.7.0 -# elasticsearchRefs: -# - clusterName: eck -# name: elasticsearch + elasticsearchRefs: + - clusterName: eck + name: elasticsearch pipelines: - pipeline.id: main path.config: /usr/share/logstash/config/pipelines @@ -137,6 +137,11 @@ stringData: } } output.conf: |- - output { - stdout { codec => rubydebug } - } \ No newline at end of file + output { + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index db4516f0a7..01fd3d23c1 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -56,7 +56,7 @@ spec: output { elasticsearch { hosts => [ "${QS_ES_HOSTS}" ] - user => "${QS_ES_USERNAME}" + user => "${QS_ES_USER}" password => "${QS_ES_PASSWORD}" cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } @@ -196,7 +196,7 @@ spec: output { elasticsearch { hosts => [ "${QS_ES_HOSTS}" ] - user => "${QS_ES_USERNAME}" + user => "${QS_ES_USER}" password => "${QS_ES_PASSWORD}" cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } @@ -235,7 +235,7 @@ stringData: output { elasticsearch { hosts => [ "${QS_ES_HOSTS}" ] - user => "${QS_ES_USERNAME}" + user => "${QS_ES_USER}" password => "${QS_ES_PASSWORD}" cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } @@ -270,7 +270,7 @@ spec: output { elasticsearch { hosts => [ "${QS_ES_HOSTS}" ] - user => "${QS_ES_USERNAME}" + user => "${QS_ES_USER}" password => "${QS_ES_PASSWORD}" cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" } @@ -288,7 +288,7 @@ The `spec.elasticsearchRefs` section provides a mechanism to help configure Logs In order to use `elasticsearchRefs` in a logstash pipeline, the logstash operator will create the necessary resources from the associated elasticsearch, and provide environment variables to allow these resources to be accessed from pipeline configuration, and will be replaced at runtime with the appropriate values. The environment variables have a fixed naming convention: `NORMALIZED_CLUSTERNAME_ES_HOSTS` -`NORMALIZED_CLUSTERNAME_ES_USERNAME` +`NORMALIZED_CLUSTERNAME_ES_USER` `NORMALIZED_CLUSTERNAME_ES_PASSWORD` `NORMALIZED_CLUSTERNAME_ES_SSL_CERTIFICATE_AUTHORITY}` @@ -334,13 +334,13 @@ spec: output { elasticsearch { <4> hosts => [ "${PROD_ES_ES_HOSTS}" ] - user => "${PROD_ES_ES_USERNAME}" + user => "${PROD_ES_ES_USER}" password => "${PROD_ES_ES_PASSWORD}" cacert => "${PROD_ES_ES_SSL_CERTIFICATE_AUTHORITY}" } elasticsearch { <4> hosts => [ "${QA_ES_ES_HOSTS}" ] - user => "${QA_ES_ES_USERNAME}" + user => "${QA_ES_ES_USER}" password => "${QA_ES_ES_PASSWORD}" cacert => "${QA_ES_ES_SSL_CERTIFICATE_AUTHORITY}" } From 7aa2a6c01ba768e586e9599dcbeae2be09220668 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Tue, 25 Apr 2023 11:02:16 -0400 Subject: [PATCH 20/26] Add intro to elasticsearchref example --- .../orchestrating-elastic-stack-applications/logstash.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 01fd3d23c1..e5b0645292 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -308,6 +308,9 @@ NOTE: The logstash ECK operator will create a user called `eck_logstash_user_rol ``` The permissions for this user <<{p}-users-and-roles,can be updated>> to include more indices if the elasticsearch plugin is expected to use indices other than the default. See <<{p}-logstash-configuration-examples, logstash configuration examples>> for a sample configuration that creates a user to write to a custom index. +The following example demonstrates how to create a Logstash deployment that connects to +different elasticsearch instances, one of which is in a separate namespace: + [source,yaml,subs="attributes,+macros,callouts"] ---- apiVersion: logstash.k8s.elastic.co/v1alpha1 From 5872086cbf57731af40bb9e41b343591e3c38882 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Wed, 26 Apr 2023 11:00:06 -0400 Subject: [PATCH 21/26] Remove duplicate recipe reference. --- .../logstash.asciidoc | 9 --------- 1 file changed, 9 deletions(-) diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index e5b0645292..4668f4f441 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -437,15 +437,6 @@ This section contains manifests that illustrate common use cases, and can be you CAUTION: The examples in this section are for illustration purposes only and should not be considered to be production-ready. Some of these examples use the `node.store.allow_mmap: false` setting on Elasticsearch which has performance implications and should be tuned for production workloads, as described in <<{p}-virtual-memory>>. -=== Single Pipeline defined in CRD - -[source,sh,subs="attributes"] ----- -kubectl apply -f {logstash_recipes}/logstash-eck.yaml ----- - -Deploys Logstash with a single pipeline defined in the CRD - === Single Pipeline defined in CRD [source,sh,subs="attributes"] From 6ceb4f9dcf35626d5a1722c64dd43518d931bbd9 Mon Sep 17 00:00:00 2001 From: Arianna Laudazzi Date: Fri, 28 Apr 2023 13:08:03 +0200 Subject: [PATCH 22/26] Add the new file to the landing page --- .../orchestrating-elastic-stack-applications.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc b/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc index 4e6a59b4c7..103430e726 100644 --- a/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc @@ -17,6 +17,7 @@ endif::[] - <<{p}-maps>> - <<{p}-enterprise-search>> - <<{p}-beat>> +- <<{p}-logstash>> - <<{p}-stack-helm-chart>> - <<{p}-recipes>> - <<{p}-securing-stack>> @@ -37,6 +38,7 @@ include::agent-fleet.asciidoc[leveloffset=+1] include::maps.asciidoc[leveloffset=+1] include::enterprise-search.asciidoc[leveloffset=+1] include::beat.asciidoc[leveloffset=+1] +include::logstash.asciidoc[leveloffset=+1] include::stack-helm-chart.asciidoc[leveloffset=+1] include::recipes.asciidoc[leveloffset=+1] include::securing-stack.asciidoc[leveloffset=+1] From e98acfe52c567eae7791556676ee26cc47cd5941 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 28 Apr 2023 11:45:19 -0400 Subject: [PATCH 23/26] Apply suggestions from code review Co-authored-by: Thibault Richard Co-authored-by: Arianna Laudazzi <46651782+alaudazzi@users.noreply.github.com> Co-authored-by: Peter Brachwitz --- config/recipes/logstash/README.asciidoc | 8 +-- config/recipes/logstash/logstash-es-role.yaml | 2 +- .../logstash/logstash-pipelinevolume.yaml | 28 +++++------ .../advanced-topics/stack-monitoring.asciidoc | 7 +-- .../logstash.asciidoc | 50 +++++++++---------- 5 files changed, 46 insertions(+), 49 deletions(-) diff --git a/config/recipes/logstash/README.asciidoc b/config/recipes/logstash/README.asciidoc index 2d11f752c2..b8b33795be 100644 --- a/config/recipes/logstash/README.asciidoc +++ b/config/recipes/logstash/README.asciidoc @@ -8,19 +8,19 @@ endif::[] = Using Logstash with ECK -These recipes demonstrate how to run Logstash, Elasticsearch, Kibana and Filebeat deployed via ECK, using the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] as a starting point +These recipes demonstrate how to run Logstash, Elasticsearch, Kibana and Filebeat deployed via ECK, using the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] as a starting point. ===== Inline Pipeline usage - `logstash-eck.yaml` -Deploys Logstash with the pipeline defined inline in the CRD +Deploys Logstash with the pipeline defined inline in the CRD. ===== Pipeline as secret - `logstash-pipelinesecret.yaml` -Deploys Logstash with the pipeline defined in a secret and referred to via `pipelinesRef` +Deploys Logstash with the pipeline defined in a Secret and referred to via `pipelinesRef`. ===== Pipeline as mounted volume - `logstash-pipelinevolume.yaml` -Deploys Logstash with the pipeline details defined in the CRD, and the pipeline itself mounted as a volume +Deploys Logstash with the pipeline details defined in the CRD, and the pipeline itself mounted as a volume. ===== Logstash with Stack Monitoring - `logstash-monitored.yaml` diff --git a/config/recipes/logstash/logstash-es-role.yaml b/config/recipes/logstash/logstash-es-role.yaml index 3c9639169a..da76b7935f 100644 --- a/config/recipes/logstash/logstash-es-role.yaml +++ b/config/recipes/logstash/logstash-es-role.yaml @@ -21,7 +21,7 @@ spec: - secretName: my-roles-secret nodeSets: - name: default - count: 2 + count: 3 config: node.store.allow_mmap: false --- diff --git a/config/recipes/logstash/logstash-pipelinevolume.yaml b/config/recipes/logstash/logstash-pipelinevolume.yaml index c4ef6efd3a..7b3db7cfe1 100644 --- a/config/recipes/logstash/logstash-pipelinevolume.yaml +++ b/config/recipes/logstash/logstash-pipelinevolume.yaml @@ -121,21 +121,21 @@ metadata: app.kubernetes.io/name: eck-logstash stringData: input.conf: |- - input { - beats { - port => 5044 - } - } + input { + beats { + port => 5044 + } + } filters.conf: |- - filter { - grok { - match => { "message" => "%{HTTPD_COMMONLOG}"} - } - geoip { - source => "[source][address]" - target => "[source]" - } - } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } output.conf: |- output { elasticsearch { diff --git a/docs/advanced-topics/stack-monitoring.asciidoc b/docs/advanced-topics/stack-monitoring.asciidoc index a43c22a8f2..c370dfb292 100644 --- a/docs/advanced-topics/stack-monitoring.asciidoc +++ b/docs/advanced-topics/stack-monitoring.asciidoc @@ -92,10 +92,7 @@ spec: namespace: observability <4> ---- -<1> The use of `namespace` is optional if the monitoring Elasticsearch cluster and the monitored Elasticsearch cluster are running in the same namespace. -<2> The use of `namespace` is optional if the Elasticsearch cluster and the Kibana instance are running in the same namespace. -<3> The use of `namespace` is optional if the Elasticsearch cluster and the Beats instance are running in the same namespace. -<4> The use of `namespace` is optional if the Elasticsearch cluster and the Logstash instances are running in the same namespace. +<1> The use of `namespace` is optional if the monitoring Elasticsearch cluster and the monitored Elastic Stack resource are running in the same namespace. NOTE: You can configure an Elasticsearch cluster to monitor itself. @@ -103,7 +100,7 @@ NOTE: If Stack Monitoring is configured for a Beat, but the corresponding Elasti NOTE: If Logs Stack Monitoring is configured for a Beat, and custom container arguments (`podTemplate.spec.containers[].args`) include `-e`, which enables logging to stderr and disables log file output, this argument will be removed from the Pod to allow the Filebeat sidecar to consume the Beat's log files. -NOTE: Stack Monitoring for Logstash on ECK is only available for Logstash versions 8.7.0 and above. +NOTE: Stack Monitoring for Logstash on ECK is only available for Logstash versions 8.7.0 and later. IMPORTANT: The monitoring cluster must be managed by ECK in the same Kubernetes cluster as the monitored one. diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 4668f4f441..8b01db7daf 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -30,7 +30,7 @@ NOTE: Running Logstash on ECK is compatible only with Logstash 8.7+. [id="{p}-logstash-quickstart"] == Quickstart -Add the following specification to create a minimal logstash deployment that will listen to a beats agent or elastic agent configured to send to logstash on port 5044, create the service and write the output to an Elasticsearch cluster named `quickstart`, created in the link:k8s-quickstart.html[Elasticsearch quickstart]. +Add the following specification to create a minimal Logstash deployment that will listen to a Beats agent or Elastic Agent configured to send to Logstash on port 5044, create the service and write the output to an Elasticsearch cluster named `quickstart`, created in the link:k8s-quickstart.html[Elasticsearch quickstart]. [source,yaml,subs="attributes,+macros,callouts"] ---- @@ -102,7 +102,7 @@ NAME READY STATUS RESTARTS AGE quickstart-ls-0 1/1 Running 0 91s ---- -. Access logs for a pod. +. Access logs for a Pod. + [source,sh] ---- @@ -120,7 +120,7 @@ You can upgrade the Logstash version or change settings by editing the YAML spec [id="{p}-logstash-configuring-logstash"] === Logstash configuration -The Logstash configuration (equivalent to logstash.yml) is defined in the `spec.config` section: +The Logstash configuration (equivalent to `logstash.yml`) is defined in the `spec.config` section: [source,yaml,subs="attributes,+macros,callouts"] ---- @@ -134,11 +134,11 @@ spec: elasticsearchRefs: - name: quickstart clusterName: qs - config: - pipeline.workers: 4 <1> + config: <1> + pipeline.workers: 4 log.level: debug ---- -<1> Customize logstash configuration using logstash.yml settings here +<1> Customize Logstash configuration using `logstash.yml` settings here Alternatively, it can be provided through a Secret specified in the `spec.configRef` section. The Secret must have an `logstash.yml` entry with this configuration: @@ -171,7 +171,7 @@ stringData: [id="{p}-logstash-pipelines"] === Configuring Logstash Pipelines -Logstash pipelines (equivalent to pipelines.yml) are defined in the `spec.pipelines` section: +Logstash pipelines (equivalent to `pipelines.yml`) are defined in the `spec.pipelines` section: [source,yaml,subs="attributes,+macros,callouts"] ---- @@ -245,7 +245,7 @@ stringData: Logstash on ECK will* support all options present in `pipelines.yml`, including settings to update the number of workers, and the size of the batch that the pipeline will process. This also includes using `path.config` to point to volumes - mounted on the logstash container: + mounted on the Logstash container: [source,yaml,subs="attributes,+macros,callouts"] ---- @@ -277,7 +277,7 @@ spec: } ---- -NOTE: Persistent Queues and Dead Letter Queues will be supported in a later release, but are not currently supported. +NOTE: Persistent Queues and Dead Letter Queues are not currently supported. [id="{p}-logstash-pipelines-es"] @@ -285,7 +285,7 @@ NOTE: Persistent Queues and Dead Letter Queues will be supported in a later rele The `spec.elasticsearchRefs` section provides a mechanism to help configure Logstash to estabish a secured connection to one or more managed Elasticsearch clusters. By default, it targets all nodes in your cluster. If you want to direct traffic to specific nodes of your Elasticsearch cluster, refer to <<{p}-traffic-splitting>> for more information and examples. -In order to use `elasticsearchRefs` in a logstash pipeline, the logstash operator will create the necessary resources from the associated elasticsearch, and provide environment variables to allow these resources to be accessed from pipeline configuration, and will be replaced at runtime with the appropriate values. +To use `elasticsearchRefs` in a Logstash pipeline, the Logstash operator will create the necessary resources from the associated Elasticsearch, and provides environment variables to allow these resources to be accessed from the pipeline configuration, and will be replaced at runtime with the appropriate values. The environment variables have a fixed naming convention: `NORMALIZED_CLUSTERNAME_ES_HOSTS` `NORMALIZED_CLUSTERNAME_ES_USER` @@ -296,7 +296,7 @@ where NORMALIZED_CLUSTERNAME is the value taken from the `clusterName` field of NOTE: The `clusterName` value should be unique across namespaces. -NOTE: The logstash ECK operator will create a user called `eck_logstash_user_role` when an `elasticsearchRef` is specified. This user has the following permissions: +NOTE: The Logstash ECK operator will create a user called `eck_logstash_user_role` when an `elasticsearchRef` is specified. This user has the following permissions: ``` "cluster": ["monitor", "manage_ilm", "read_ilm", "manage_logstash_pipelines", "manage_index_templates", "cluster:admin/ingest/pipeline/get",], "indices": [ @@ -306,10 +306,10 @@ NOTE: The logstash ECK operator will create a user called `eck_logstash_user_rol } ``` -The permissions for this user <<{p}-users-and-roles,can be updated>> to include more indices if the elasticsearch plugin is expected to use indices other than the default. See <<{p}-logstash-configuration-examples, logstash configuration examples>> for a sample configuration that creates a user to write to a custom index. +The permissions for this user <<{p}-users-and-roles,can be updated>> to include more indices if the Elasticsearch plugin is expected to use indices other than the default. See <<{p}-logstash-configuration-examples, Logstash configuration examples>> for a sample configuration that creates a user to write to a custom index. The following example demonstrates how to create a Logstash deployment that connects to -different elasticsearch instances, one of which is in a separate namespace: +different Elasticsearch instances, one of which is in a separate namespace: [source,yaml,subs="attributes,+macros,callouts"] ---- @@ -351,17 +351,17 @@ spec: ---- -<1> Define Elasticsearch references in the CRD. This will create the appropriate secrets to store certificate details and the rest of the connection information, and create environment variables to allow them to be referred to in Logstash pipeline configurations. -<2> This refers to an Elasticsearch cluster residing in the same namespace as the logstash instances -<3> This refers to an Elasticsearch cluster residing in a different namespace to the logstash instance -<4> Elasticsearch output definitions - use the environment variables created by the Logstash operator when specifying an `ElasticsearchRef`. Note the use of "normalized" versions of the `clusterName` in the environment variables used to populate the relevant fields +<1> Define Elasticsearch references in the CRD. This will create the appropriate Secrets to store certificate details and the rest of the connection information, and create environment variables to allow them to be referred to in Logstash pipeline configurations. +<2> This refers to an Elasticsearch cluster residing in the same namespace as the Logstash instances. +<3> This refers to an Elasticsearch cluster residing in a different namespace to the Logstash instances. +<4> Elasticsearch output definitions - use the environment variables created by the Logstash operator when specifying an `ElasticsearchRef`. Note the use of "normalized" versions of the `clusterName` in the environment variables used to populate the relevant fields. [id="{p}-logstash-expose-services"] === Expose Services -By default, the Logstash operator creates a headless service for the metrics endpoint to enable metric collection by the metricbeat sidecar for stack monitoring: +By default, the Logstash operator creates a headless Service for the metrics endpoint to enable metric collection by the Metricbeat sidecar for Stack Monitoring: + [source,sh] @@ -465,7 +465,7 @@ kubectl apply -f {logstash_recipes}/logstash-pipelinevolume.yaml Deploys Logstash with a single pipeline defined in a secret, mounted as a volume, and referenced by `path.config` -=== Writing to a custom elasticsearch index +=== Writing to a custom Elasticsearch index [source,sh,subs="attributes"] ---- @@ -482,7 +482,7 @@ Deploys Logstash and Elasticsearch, and creates an updated version of the `eck_l kubectl apply -f {logstash_recipes}/logstash-monitored.yaml ---- -Deploys an Elasticsearch and Kibana monitoring cluster, and a Logstash that will send it's monitoring information to this cluster. You can view the stack monitoring information in the monitoring cluster's Kibana +Deploys an Elasticsearch and Kibana monitoring cluster, and a Logstash that will send its monitoring information to this cluster. You can view the stack monitoring information in the monitoring cluster's Kibana === Multiple pipelines/multiple es clusters @@ -522,11 +522,11 @@ spec: [id="{p}-logstash-scaling-logstash"] == Scaling Logstash -* The ability to scale Logstash is highly dependent on the pipeline configurations, and the plugins used in those pipelines. Not all logstash deployments can be scaled horizontally by increasing the number of Logstash pods defined in the Logstash resource - depending on the plugins being used, this could result in data loss/duplication of data or pods running idle unable to be utilized. +* The ability to scale Logstash is highly dependent on the pipeline configurations, and the plugins used in those pipelines. Not all Logstash deployments can be scaled horizontally by increasing the number of Logstash Pods defined in the Logstash resource - depending on the plugins being used, this could result in data loss/duplication of data or Pods running idle unable to be utilized. * Particular care should be taken with plugins that: ** Retrieve data from external sources. -*** Plugins that retrieve data from external sources, and require some level of coordination between nodes to split up work, are not good candidates for scaling horizontally, and would likely produce some data duplication. These are plugins such as the JDBC input plugin, which has no automatic way to split queries across logstash instances, or the S3 input, which has no way to split which buckets to read across logstash instances. -*** Plugins that retrieve data from external sources, where work is distributed externally to logstash, but may impose their own limits. These are plugins like the kafka input, or azure event hubs, where the parallelism is limited by the number of partitions vs the number of consumers. In cases like this, extra logstash pods may be idle if the number of consumer threads multiplied by the number of pods is greater than the number of partitions. +*** Plugins that retrieve data from external sources, and require some level of coordination between nodes to split up work, are not good candidates for scaling horizontally, and would likely produce some data duplication. These are plugins such as the JDBC input plugin, which has no automatic way to split queries across Logstash instances, or the S3 input, which has no way to split which buckets to read across Logstash instances. +*** Plugins that retrieve data from external sources, where work is distributed externally to Logstash, but may impose their own limits. These are plugins like the Kafka input, or Azure event hubs, where the parallelism is limited by the number of partitions vs the number of consumers. In cases like this, extra Logstash Pods may be idle if the number of consumer threads multiplied by the number of Pods is greater than the number of partitions. ** Plugins that require events to be received in order. *** Certain plugins, such as the aggregate filter, expect events to be received in strict order to run without error or data loss. Any plugin that requires the number of pipeline workers to be `1` will also have issues when horizontal scaling is used. * If the pipeline does not contain any such plugin, the number of Logstash instances can be increased by setting the `count` property in the Logstash resource: @@ -553,10 +553,10 @@ Note that this release is a technical preview, is still under active development * No integrated support for persistence ** The operator provides no integrated support for persistence, including PQ, DLQ support and plugins that may require persistent storage to keep track of state - any persistence should be added manually. * `ElasticsearchRef` implementation in plugins is in preview mode -** Adding elasticsearch to plugin definitions requires the use of environment variables populated by the Logstash operator, which may change in future versions of the logstash operator. +** Adding Elasticsearch to plugin definitions requires the use of environment variables populated by the Logstash operator, which may change in future versions of the Logstash operator. -* Limited support for plugins. Note that this is not an exhaustive list, and plugins outside of the logstash plugin matrix have not been considered for this list: +* Limited support for plugins. Note that this is not an exhaustive list, and plugins outside of the Logstash plugin matrix have not been considered for this list: ** Input plugins: *** The following plugins are supported: logstash-input-azure_event_hubs, logstash-input-beats, logstash-input-elastic_agent, logstash-input-kafka, logstash-input-tcp, logstash-input-http, logstash-input-udp ** Filter Plugins: From 301a554bfef27a3c9ead9b03cd92ac01f5cb0412 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 28 Apr 2023 11:53:15 -0400 Subject: [PATCH 24/26] Tidy up recipes after code review comments --- config/recipes/logstash/README.asciidoc | 4 +- config/recipes/logstash/logstash-eck.yaml | 16 -- .../recipes/logstash/logstash-monitored.yaml | 20 -- config/recipes/logstash/logstash-multi.yaml | 28 +-- ....yaml => logstash-pipeline-as-secret.yaml} | 18 -- ....yaml => logstash-pipeline-as-volume.yaml} | 18 -- config/recipes/logstash/logstash.yaml | 189 ------------------ .../logstash.asciidoc | 4 +- 8 files changed, 5 insertions(+), 292 deletions(-) rename config/recipes/logstash/{logstash-pipelinesecret.yaml => logstash-pipeline-as-secret.yaml} (83%) rename config/recipes/logstash/{logstash-pipelinevolume.yaml => logstash-pipeline-as-volume.yaml} (84%) delete mode 100644 config/recipes/logstash/logstash.yaml diff --git a/config/recipes/logstash/README.asciidoc b/config/recipes/logstash/README.asciidoc index b8b33795be..63ef92a723 100644 --- a/config/recipes/logstash/README.asciidoc +++ b/config/recipes/logstash/README.asciidoc @@ -14,11 +14,11 @@ These recipes demonstrate how to run Logstash, Elasticsearch, Kibana and Filebea Deploys Logstash with the pipeline defined inline in the CRD. -===== Pipeline as secret - `logstash-pipelinesecret.yaml` +===== Pipeline as secret - `logstash-pipeline-as-secret.yaml` Deploys Logstash with the pipeline defined in a Secret and referred to via `pipelinesRef`. -===== Pipeline as mounted volume - `logstash-pipelinevolume.yaml` +===== Pipeline as mounted volume - `logstash-pipeline-as-volume.yaml` Deploys Logstash with the pipeline details defined in the CRD, and the pipeline itself mounted as a volume. diff --git a/config/recipes/logstash/logstash-eck.yaml b/config/recipes/logstash/logstash-eck.yaml index d24dfbe8a6..1bb758e37e 100644 --- a/config/recipes/logstash/logstash-eck.yaml +++ b/config/recipes/logstash/logstash-eck.yaml @@ -3,9 +3,6 @@ apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch metadata: name: elasticsearch - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch spec: version: 8.7.0 nodeSets: @@ -19,9 +16,6 @@ apiVersion: kibana.k8s.elastic.co/v1 kind: Kibana metadata: name: kibana - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana spec: version: 8.7.0 count: 1 @@ -32,9 +26,6 @@ apiVersion: beat.k8s.elastic.co/v1beta1 kind: Beat metadata: name: filebeat - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: type: filebeat version: 8.7.0 @@ -47,10 +38,6 @@ spec: hosts: ["logstash-ls-api.default.svc:5044"] deployment: podTemplate: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: automountServiceAccountToken: true initContainers: @@ -78,9 +65,6 @@ apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash metadata: name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash spec: count: 1 version: 8.7.0 diff --git a/config/recipes/logstash/logstash-monitored.yaml b/config/recipes/logstash/logstash-monitored.yaml index 8d57409265..255745cfc3 100644 --- a/config/recipes/logstash/logstash-monitored.yaml +++ b/config/recipes/logstash/logstash-monitored.yaml @@ -3,9 +3,6 @@ apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch metadata: name: elasticsearch - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch spec: version: 8.7.0 nodeSets: @@ -19,9 +16,6 @@ apiVersion: kibana.k8s.elastic.co/v1 kind: Kibana metadata: name: kibana - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana spec: version: 8.7.0 count: 1 @@ -32,9 +26,6 @@ apiVersion: beat.k8s.elastic.co/v1beta1 kind: Beat metadata: name: filebeat - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: type: filebeat version: 8.7.0 @@ -47,10 +38,6 @@ spec: hosts: ["logstash-ls-api.default.svc:5044"] deployment: podTemplate: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: automountServiceAccountToken: true initContainers: @@ -78,9 +65,6 @@ apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash metadata: name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash spec: count: 1 version: 8.7.0 @@ -148,7 +132,3 @@ spec: count: 1 elasticsearchRef: name: elasticsearch-monitoring - config: - # enable the UI to reflect container level CPU usage, only displays info if CPU limits are set on the monitored ES cluster - # https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html - monitoring.ui.container.elasticsearch.enabled: true \ No newline at end of file diff --git a/config/recipes/logstash/logstash-multi.yaml b/config/recipes/logstash/logstash-multi.yaml index e77b6c3feb..191a633ad4 100644 --- a/config/recipes/logstash/logstash-multi.yaml +++ b/config/recipes/logstash/logstash-multi.yaml @@ -11,9 +11,6 @@ kind: Elasticsearch metadata: name: qa namespace: qa - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch spec: version: 8.7.0 nodeSets: @@ -28,9 +25,6 @@ kind: Kibana metadata: name: qa namespace: qa - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana spec: version: 8.7.0 count: 1 @@ -41,9 +35,6 @@ apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch metadata: name: production - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch spec: version: 8.7.0 nodeSets: @@ -57,9 +48,6 @@ apiVersion: kibana.k8s.elastic.co/v1 kind: Kibana metadata: name: production - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana spec: version: 8.7.0 count: 1 @@ -70,9 +58,6 @@ apiVersion: beat.k8s.elastic.co/v1beta1 kind: Beat metadata: name: filebeat - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: type: filebeat version: 8.7.0 @@ -85,10 +70,6 @@ spec: hosts: ["logstash-ls-api.default.svc:5044"] deployment: podTemplate: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: automountServiceAccountToken: true initContainers: @@ -116,9 +97,6 @@ apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash metadata: name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash spec: count: 1 version: 8.7.0 @@ -218,8 +196,4 @@ spec: version: 8.7.0 count: 1 elasticsearchRef: - name: elasticsearch-monitoring - config: - # enable the UI to reflect container level CPU usage, only displays info if CPU limits are set on the monitored ES cluster - # https://www.elastic.co/guide/en/kibana/current/monitoring-settings-kb.html - monitoring.ui.container.elasticsearch.enabled: true \ No newline at end of file + name: elasticsearch-monitoring \ No newline at end of file diff --git a/config/recipes/logstash/logstash-pipelinesecret.yaml b/config/recipes/logstash/logstash-pipeline-as-secret.yaml similarity index 83% rename from config/recipes/logstash/logstash-pipelinesecret.yaml rename to config/recipes/logstash/logstash-pipeline-as-secret.yaml index c2ab283ede..c0edfc28fa 100644 --- a/config/recipes/logstash/logstash-pipelinesecret.yaml +++ b/config/recipes/logstash/logstash-pipeline-as-secret.yaml @@ -3,9 +3,6 @@ apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch metadata: name: elasticsearch - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch spec: version: 8.7.0 nodeSets: @@ -19,9 +16,6 @@ apiVersion: kibana.k8s.elastic.co/v1 kind: Kibana metadata: name: kibana - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana spec: version: 8.7.0 count: 1 @@ -32,9 +26,6 @@ apiVersion: beat.k8s.elastic.co/v1beta1 kind: Beat metadata: name: filebeat - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: type: filebeat version: 8.7.0 @@ -47,10 +38,6 @@ spec: hosts: ["logstash-ls-api.default.svc:5044"] deployment: podTemplate: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: automountServiceAccountToken: true initContainers: @@ -78,9 +65,6 @@ apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash metadata: name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash spec: count: 1 version: 8.7.0 @@ -104,8 +88,6 @@ apiVersion: v1 kind: Secret metadata: name: logstash-pipeline - labels: - app.kubernetes.io/name: eck-logstash stringData: pipelines.yml: |- - pipeline.id: main diff --git a/config/recipes/logstash/logstash-pipelinevolume.yaml b/config/recipes/logstash/logstash-pipeline-as-volume.yaml similarity index 84% rename from config/recipes/logstash/logstash-pipelinevolume.yaml rename to config/recipes/logstash/logstash-pipeline-as-volume.yaml index 7b3db7cfe1..52539e2d40 100644 --- a/config/recipes/logstash/logstash-pipelinevolume.yaml +++ b/config/recipes/logstash/logstash-pipeline-as-volume.yaml @@ -3,9 +3,6 @@ apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch metadata: name: elasticsearch - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch spec: version: 8.7.0 nodeSets: @@ -19,9 +16,6 @@ apiVersion: kibana.k8s.elastic.co/v1 kind: Kibana metadata: name: kibana - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana spec: version: 8.7.0 count: 1 @@ -32,9 +26,6 @@ apiVersion: beat.k8s.elastic.co/v1beta1 kind: Beat metadata: name: filebeat - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: type: filebeat version: 8.7.0 @@ -47,10 +38,6 @@ spec: hosts: ["logstash-ls-api.default.svc:5044"] deployment: podTemplate: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat spec: automountServiceAccountToken: true initContainers: @@ -78,9 +65,6 @@ apiVersion: logstash.k8s.elastic.co/v1alpha1 kind: Logstash metadata: name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash spec: count: 1 version: 8.7.0 @@ -117,8 +101,6 @@ apiVersion: v1 kind: Secret metadata: name: logstash-pipeline - labels: - app.kubernetes.io/name: eck-logstash stringData: input.conf: |- input { diff --git a/config/recipes/logstash/logstash.yaml b/config/recipes/logstash/logstash.yaml deleted file mode 100644 index 8c41800efc..0000000000 --- a/config/recipes/logstash/logstash.yaml +++ /dev/null @@ -1,189 +0,0 @@ ---- -apiVersion: elasticsearch.k8s.elastic.co/v1 -kind: Elasticsearch -metadata: - name: elasticsearch - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch -spec: - version: 8.7.0 - nodeSets: - - name: default - count: 3 - config: - # This setting has performance implications. See the README for more details. - node.store.allow_mmap: false ---- -apiVersion: kibana.k8s.elastic.co/v1 -kind: Kibana -metadata: - name: kibana - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana -spec: - version: 8.7.0 - count: 1 - elasticsearchRef: - name: elasticsearch ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: logstash-config - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -data: - logstash.yml: | - http.host: "0.0.0.0" - path.config: /usr/share/logstash/pipeline ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: logstash-pipeline - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -data: - logstash.conf: | - input { - beats { - port => 5044 - } - } - filter { - grok { - match => { "message" => "%{COMBINEDAPACHELOG}"} - } - geoip { - source => "clientip" - target => "clientgeo" - } - } - output { - elasticsearch { - hosts => [ "${ES_HOSTS}" ] - user => "${ES_USER}" - password => "${ES_PASSWORD}" - cacert => '/etc/logstash/certificates/ca.crt' - } - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash - template: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash - spec: - containers: - - name: logstash - image: docker.elastic.co/logstash/logstash:8.7.0 - ports: - - name: "tcp-beats" - containerPort: 5044 - env: - - name: ES_HOSTS - value: "https://elasticsearch-es-http.default.svc:9200" - - name: ES_USER - value: "elastic" - - name: ES_PASSWORD - valueFrom: - secretKeyRef: - name: elasticsearch-es-elastic-user - key: elastic - volumeMounts: - - name: config-volume - mountPath: /usr/share/logstash/config - - name: pipeline-volume - mountPath: /usr/share/logstash/pipeline - - name: ca-certs - mountPath: /etc/logstash/certificates - readOnly: true - volumes: - - name: config-volume - configMap: - name: logstash-config - - name: pipeline-volume - configMap: - name: logstash-pipeline - - name: ca-certs - secret: - secretName: elasticsearch-es-http-certs-public ---- -apiVersion: v1 -kind: Service -metadata: - name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -spec: - ports: - - name: "tcp-beats" - port: 5044 - targetPort: 5044 - selector: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash ---- -apiVersion: beat.k8s.elastic.co/v1beta1 -kind: Beat -metadata: - name: filebeat - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat -spec: - type: filebeat - version: 8.7.0 - config: - filebeat.inputs: - - type: log - paths: - - /data/logstash-tutorial.log - output.logstash: - hosts: ["logstash.default.svc:5044"] - deployment: - podTemplate: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat - spec: - automountServiceAccountToken: true - initContainers: - - name: download-tutorial - image: curlimages/curl - command: ["/bin/sh"] - args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] - volumeMounts: - - name: data - mountPath: /data - containers: - - name: filebeat - volumeMounts: - - name: data - mountPath: /data - - name: beat-data - mountPath: /usr/share/filebeat/data - volumes: - - name: data - emptydir: {} - - name: beat-data - emptydir: {} diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 8b01db7daf..1ec2729a39 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -450,7 +450,7 @@ Deploys Logstash with a single pipeline defined in the CRD [source,sh,subs="attributes"] ---- -kubectl apply -f {logstash_recipes}/logstash-pipelinesecret.yaml +kubectl apply -f {logstash_recipes}/logstash-pipeline-as-secret.yaml ---- Deploys Logstash with a single pipeline defined in a secret, referenced by a `pipelineRef` @@ -459,7 +459,7 @@ Deploys Logstash with a single pipeline defined in a secret, referenced by a `pi [source,sh,subs="attributes"] ---- -kubectl apply -f {logstash_recipes}/logstash-pipelinevolume.yaml +kubectl apply -f {logstash_recipes}/logstash-pipeline-as-volume.yaml ---- Deploys Logstash with a single pipeline defined in a secret, mounted as a volume, and referenced by From 232d61b5ee6b0c9a18153a5f5a64f433829147f7 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 28 Apr 2023 12:06:39 -0400 Subject: [PATCH 25/26] More code review suggestions --- .../logstash.asciidoc | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index 1ec2729a39..d63442f66a 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -73,7 +73,7 @@ spec: targetPort: 5044 EOF ---- -+ + Check <<{p}-logstash-configuration-examples>> for more ready-to-use manifests. . Check the status of Logstash @@ -103,7 +103,7 @@ quickstart-ls-0 1/1 Running 0 91s ---- . Access logs for a Pod. -+ + [source,sh] ---- kubectl logs -f quickstart-ls-0 @@ -243,7 +243,7 @@ stringData: ---- -Logstash on ECK will* support all options present in `pipelines.yml`, including settings to update the number of workers, and +Logstash on ECK will support all options present in `pipelines.yml`, including settings to update the number of workers, and the size of the batch that the pipeline will process. This also includes using `path.config` to point to volumes mounted on the Logstash container: @@ -294,7 +294,7 @@ The environment variables have a fixed naming convention: where NORMALIZED_CLUSTERNAME is the value taken from the `clusterName` field of the `elasticsearchRef` property, capitalized, and `-` transformed to `_` - eg, prod-es, would becomed PROD_ES. -NOTE: The `clusterName` value should be unique across namespaces. +NOTE: The `clusterName` value should be unique across all referenced Elasticsearches in the same Logstash spec. NOTE: The Logstash ECK operator will create a user called `eck_logstash_user_role` when an `elasticsearchRef` is specified. This user has the following permissions: ``` @@ -363,12 +363,12 @@ spec: By default, the Logstash operator creates a headless Service for the metrics endpoint to enable metric collection by the Metricbeat sidecar for Stack Monitoring: -+ + [source,sh] ---- kubectl get service quickstart-ls-api ---- -+ + [source,sh,subs="attributes"] ---- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE @@ -522,14 +522,16 @@ spec: [id="{p}-logstash-scaling-logstash"] == Scaling Logstash -* The ability to scale Logstash is highly dependent on the pipeline configurations, and the plugins used in those pipelines. Not all Logstash deployments can be scaled horizontally by increasing the number of Logstash Pods defined in the Logstash resource - depending on the plugins being used, this could result in data loss/duplication of data or Pods running idle unable to be utilized. -* Particular care should be taken with plugins that: -** Retrieve data from external sources. -*** Plugins that retrieve data from external sources, and require some level of coordination between nodes to split up work, are not good candidates for scaling horizontally, and would likely produce some data duplication. These are plugins such as the JDBC input plugin, which has no automatic way to split queries across Logstash instances, or the S3 input, which has no way to split which buckets to read across Logstash instances. -*** Plugins that retrieve data from external sources, where work is distributed externally to Logstash, but may impose their own limits. These are plugins like the Kafka input, or Azure event hubs, where the parallelism is limited by the number of partitions vs the number of consumers. In cases like this, extra Logstash Pods may be idle if the number of consumer threads multiplied by the number of Pods is greater than the number of partitions. -** Plugins that require events to be received in order. -*** Certain plugins, such as the aggregate filter, expect events to be received in strict order to run without error or data loss. Any plugin that requires the number of pipeline workers to be `1` will also have issues when horizontal scaling is used. -* If the pipeline does not contain any such plugin, the number of Logstash instances can be increased by setting the `count` property in the Logstash resource: +The ability to scale Logstash is highly dependent on the pipeline configurations, and the plugins used in those pipelines. Not all Logstash deployments can be scaled horizontally by increasing the number of Logstash Pods defined in the Logstash resource - depending on the plugins being used, this could result in data loss/duplication of data or Pods running idle unable to be utilized. + +Particular care should be taken with plugins that: + +* Retrieve data from external sources. +** Plugins that retrieve data from external sources, and require some level of coordination between nodes to split up work, are not good candidates for scaling horizontally, and would likely produce some data duplication. These are plugins such as the JDBC input plugin, which has no automatic way to split queries across Logstash instances, or the S3 input, which has no way to split which buckets to read across Logstash instances. +** Plugins that retrieve data from external sources, where work is distributed externally to Logstash, but may impose their own limits. These are plugins like the Kafka input, or Azure event hubs, where the parallelism is limited by the number of partitions vs the number of consumers. In cases like this, extra Logstash Pods may be idle if the number of consumer threads multiplied by the number of Pods is greater than the number of partitions. +* Plugins that require events to be received in order. +** Certain plugins, such as the aggregate filter, expect events to be received in strict order to run without error or data loss. Any plugin that requires the number of pipeline workers to be `1` will also have issues when horizontal scaling is used. + If the pipeline does not contain any such plugin, the number of Logstash instances can be increased by setting the `count` property in the Logstash resource: [source,yaml,subs="attributes,+macros,callouts"] ---- From c3b9007ee52693d22cad3c34c68408bc23d04056 Mon Sep 17 00:00:00 2001 From: Rob Bavey Date: Fri, 28 Apr 2023 12:08:50 -0400 Subject: [PATCH 26/26] Missed capitalization --- config/recipes/logstash/README.asciidoc | 2 +- docs/orchestrating-elastic-stack-applications/logstash.asciidoc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/recipes/logstash/README.asciidoc b/config/recipes/logstash/README.asciidoc index 63ef92a723..791d28b08a 100644 --- a/config/recipes/logstash/README.asciidoc +++ b/config/recipes/logstash/README.asciidoc @@ -14,7 +14,7 @@ These recipes demonstrate how to run Logstash, Elasticsearch, Kibana and Filebea Deploys Logstash with the pipeline defined inline in the CRD. -===== Pipeline as secret - `logstash-pipeline-as-secret.yaml` +===== Pipeline as Secret - `logstash-pipeline-as-secret.yaml` Deploys Logstash with the pipeline defined in a Secret and referred to via `pipelinesRef`. diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc index d63442f66a..c223e90738 100644 --- a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -446,7 +446,7 @@ kubectl apply -f {logstash_recipes}/logstash-eck.yaml Deploys Logstash with a single pipeline defined in the CRD -=== Single Pipeline defined in secret +=== Single Pipeline defined in Secret [source,sh,subs="attributes"] ----