Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce the Logstash operator for ECK #6732

Merged
merged 16 commits into from
Apr 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 17 additions & 8 deletions cmd/manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ import (
"strings"
"time"

logstashv1alpha1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/logstash/v1alpha1"
"github.com/elastic/cloud-on-k8s/v2/pkg/controller/logstash"

"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/viper"
Expand Down Expand Up @@ -847,6 +850,7 @@ func registerControllers(mgr manager.Manager, params operator.Parameters, access
{name: "Agent", registerFunc: agent.Add},
{name: "Maps", registerFunc: maps.Add},
{name: "StackConfigPolicy", registerFunc: stackconfigpolicy.Add},
{name: "Logstash", registerFunc: logstash.Add},
}

for _, c := range controllers {
Expand All @@ -872,9 +876,11 @@ func registerControllers(mgr manager.Manager, params operator.Parameters, access
{name: "AGENT-KB", registerFunc: associationctl.AddAgentKibana},
{name: "AGENT-FS", registerFunc: associationctl.AddAgentFleetServer},
{name: "EMS-ES", registerFunc: associationctl.AddMapsES},
{name: "LOGSTASH-ES", registerFunc: associationctl.AddLogstashES},
{name: "ES-MONITORING", registerFunc: associationctl.AddEsMonitoring},
{name: "KB-MONITORING", registerFunc: associationctl.AddKbMonitoring},
{name: "BEAT-MONITORING", registerFunc: associationctl.AddBeatMonitoring},
{name: "LOGSTASH-MONITORING", registerFunc: associationctl.AddLogstashMonitoring},
}

for _, c := range assocControllers {
Expand Down Expand Up @@ -913,6 +919,7 @@ func garbageCollectUsers(ctx context.Context, cfg *rest.Config, managedNamespace
For(&beatv1beta1.BeatList{}, associationctl.BeatAssociationLabelNamespace, associationctl.BeatAssociationLabelName).
For(&agentv1alpha1.AgentList{}, associationctl.AgentAssociationLabelNamespace, associationctl.AgentAssociationLabelName).
For(&emsv1alpha1.ElasticMapsServerList{}, associationctl.MapsESAssociationLabelNamespace, associationctl.MapsESAssociationLabelName).
For(&logstashv1alpha1.LogstashList{}, associationctl.LogstashAssociationLabelNamespace, associationctl.LogstashAssociationLabelName).
DoGarbageCollection(ctx)
if err != nil {
return fmt.Errorf("user garbage collector failed: %w", err)
Expand All @@ -925,14 +932,15 @@ func garbageCollectSoftOwnedSecrets(ctx context.Context, k8sClient k8s.Client) {
defer span.End()

if err := reconciler.GarbageCollectAllSoftOwnedOrphanSecrets(ctx, k8sClient, map[string]client.Object{
esv1.Kind: &esv1.Elasticsearch{},
apmv1.Kind: &apmv1.ApmServer{},
kbv1.Kind: &kbv1.Kibana{},
entv1.Kind: &entv1.EnterpriseSearch{},
beatv1beta1.Kind: &beatv1beta1.Beat{},
agentv1alpha1.Kind: &agentv1alpha1.Agent{},
emsv1alpha1.Kind: &emsv1alpha1.ElasticMapsServer{},
policyv1alpha1.Kind: &policyv1alpha1.StackConfigPolicy{},
esv1.Kind: &esv1.Elasticsearch{},
apmv1.Kind: &apmv1.ApmServer{},
kbv1.Kind: &kbv1.Kibana{},
entv1.Kind: &entv1.EnterpriseSearch{},
beatv1beta1.Kind: &beatv1beta1.Beat{},
agentv1alpha1.Kind: &agentv1alpha1.Agent{},
emsv1alpha1.Kind: &emsv1alpha1.ElasticMapsServer{},
policyv1alpha1.Kind: &policyv1alpha1.StackConfigPolicy{},
logstashv1alpha1.Kind: &logstashv1alpha1.Logstash{},
}); err != nil {
log.Error(err, "Orphan secrets garbage collection failed, will be attempted again at next operator restart.")
return
Expand Down Expand Up @@ -973,6 +981,7 @@ func setupWebhook(
&kbv1.Kibana{},
&kbv1beta1.Kibana{},
&emsv1alpha1.ElasticMapsServer{},
&logstashv1alpha1.Logstash{},
}
for _, obj := range webhookObjects {
if err := commonwebhook.SetupValidatingWebhookWithConfig(&commonwebhook.Config{
Expand Down
785 changes: 785 additions & 0 deletions config/crds/v1/all-crds.yaml

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions config/crds/v1/bases/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ resources:
- agent.k8s.elastic.co_agents.yaml
- maps.k8s.elastic.co_elasticmapsservers.yaml
- stackconfigpolicy.k8s.elastic.co_stackconfigpolicies.yaml
- logstash.k8s.elastic.co_logstashes.yaml
8,196 changes: 8,196 additions & 0 deletions config/crds/v1/bases/logstash.k8s.elastic.co_logstashes.yaml

Large diffs are not rendered by default.

8 changes: 8 additions & 0 deletions config/crds/v1/patches/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,4 +70,12 @@ patchesJson6902:
kind: CustomResourceDefinition
name: elasticmapsservers.maps.k8s.elastic.co
path: maps-patches.yaml
# custom patches for Logstash
- target:
group: apiextensions.k8s.io
version: v1
kind: CustomResourceDefinition
name: logstashes.logstash.k8s.elastic.co
path: logstash-patches.yaml


7 changes: 7 additions & 0 deletions config/crds/v1/patches/logstash-patches.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Using `kubectl apply` stores the complete CRD file as an annotation,
# which may be too big for the annotations size limit.
# One way to mitigate this problem is to remove the (huge) podTemplate properties from the CRD.
# It also avoids the problem of having any k8s-version specific field in the Pod schema,
# that would maybe not match the user's k8s version.
- op: remove
path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/podTemplate/properties
13 changes: 13 additions & 0 deletions config/e2e/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,19 @@ rules:
- update
- patch
- delete
- apiGroups :
- logstash.k8s.elastic.co
resources:
- logstashes
- logstashes/status
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- storage.k8s.io
resources:
Expand Down
18 changes: 18 additions & 0 deletions config/samples/logstash/logstash.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: logstash.k8s.elastic.co/v1alpha1
kind: Logstash
metadata:
name: logstash-sample
spec:
count: 3
version: 8.6.1
config:
log.level: info
api.http.host: "0.0.0.0"
queue.type: memory
pipelines:
- pipeline.id: main
config.string: input { exec { command => 'uptime' interval => 10 } } output { stdout{} }
podTemplate:
spec:
containers:
- name: logstash
36 changes: 36 additions & 0 deletions config/samples/logstash/logstash_es.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-sample
spec:
version: 8.7.0
nodeSets:
- name: default
count: 2
config:
node.store.allow_mmap: false
---
apiVersion: logstash.k8s.elastic.co/v1alpha1
kind: Logstash
metadata:
name: logstash-sample
spec:
count: 1
version: 8.7.0
elasticsearchRefs:
- clusterName: production
name: elasticsearch-sample
pipelines:
- pipeline.id: main
config.string: |
input { exec { command => 'uptime' interval => 10 } }
output {
elasticsearch {
hosts => [ "${PRODUCTION_ES_HOSTS}" ]
ssl => true
cacert => "${PRODUCTION_ES_SSL_CERTIFICATE_AUTHORITY}"
user => "${PRODUCTION_ES_USER}"
password => "${PRODUCTION_ES_PASSWORD}"
}
}
---
46 changes: 46 additions & 0 deletions config/samples/logstash/logstash_stackmonitor.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
---
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: monitoring
spec:
version: 8.6.1
nodeSets:
- name: default
count: 1
config:
node.store.allow_mmap: false
---
apiVersion: logstash.k8s.elastic.co/v1alpha1
kind: Logstash
metadata:
name: logstash-sample
spec:
count: 1
version: 8.7.0
config:
log.level: info
api.http.host: "0.0.0.0"
queue.type: memory
podTemplate:
spec:
containers:
- name: logstash
monitoring:
metrics:
elasticsearchRefs:
- name: monitoring
logs:
elasticsearchRefs:
- name: monitoring
---
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: kibana-sample
spec:
version: 8.6.1
elasticsearchRef:
name: monitoring
count: 1
---
51 changes: 51 additions & 0 deletions config/samples/logstash/logstash_svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-sample
spec:
version: 8.6.1
nodeSets:
- name: default
count: 3
config:
node.store.allow_mmap: false
---
apiVersion: logstash.k8s.elastic.co/v1alpha1
kind: Logstash
metadata:
name: logstash-sample
spec:
count: 2
version: 8.6.1
config:
log.level: info
api.http.host: "0.0.0.0"
api.http.port: 9601
queue.type: memory
pipelines:
- pipeline.id: main
pipeline.workers: 2
config.string: "input { beats { port => 5044 }} output { stdout {}}"
services:
- name: api
service:
spec:
type: ClusterIP
ports:
- port: 9601
name: "api"
protocol: TCP
targetPort: 9601
- name: beats
service:
spec:
type: ClusterIP
ports:
- port: 5044
name: "filebeat"
protocol: TCP
targetPort: 5044
- port: 5045
name: "winlogbeat"
protocol: TCP
targetPort: 5045
22 changes: 22 additions & 0 deletions config/webhook/manifests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,28 @@ webhooks:
resources:
- kibanas
sideEffects: None
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: system
path: /validate-logstash-k8s-elastic-co-v1alpha1-logstash
failurePolicy: Ignore
matchPolicy: Exact
name: elastic-logstash-validation-v1alpha1.k8s.elastic.co
rules:
- apiGroups:
- logstash.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- logstashes
sideEffects: None
- admissionReviewVersions:
- v1
- v1beta1
Expand Down
Loading