diff --git a/config/recipes/logstash/README.asciidoc b/config/recipes/logstash/README.asciidoc index 65775b5ad5..cff266ba98 100644 --- a/config/recipes/logstash/README.asciidoc +++ b/config/recipes/logstash/README.asciidoc @@ -8,7 +8,31 @@ endif::[] = Using Logstash with ECK -This recipe demonstrates how to run the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] on Kubernetes with Elasticsearch, Kibana and Filebeat deployed via ECK. +These recipes demonstrate how to run Logstash, Elasticsearch, Kibana and Filebeat deployed via ECK, using the link:https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html[Logstash log parsing example] as a starting point. +===== Inline Pipeline usage - `logstash-eck.yaml` + +Deploys Logstash with the pipeline defined inline in the CRD. + +===== Pipeline as Secret - `logstash-pipeline-as-secret.yaml` + +Deploys Logstash with the pipeline defined in a Secret and referred to via `pipelinesRef`. + +===== Pipeline as mounted volume - `logstash-pipeline-as-volume.yaml` + +Deploys Logstash with the pipeline details defined in the CRD, and the pipeline itself mounted as a volume. + +===== Logstash with multiple pipelines and multiple elasticsearchRefs - `logstash-multi.yaml` + +Deploys Logstash with multiple pipelines, each of which sends to a separate Elasticsearch cluster. + +===== Logstash with Stack Monitoring - `logstash-monitored.yaml` + +Deploys Logstash and a dedicated Elasticsearch and Kibana monitoring cluster, and sends Logstash monitoring data to that cluster. + +===== Logstash and Elasticsearch with custom role - `logstash-es-role.yaml` + +Deploys Logstash and Elasticsearch and a Secret to customize Elasticsearch role `eck_logstash_user_role`. The role is essential for Logstash to have privileges to write document to custom index "my-index". + +CAUTION: These recipes use the `node.store.allow_mmap: false` configuration value to avoid configuring memory mapping settings on the underlying host. This could have a significant performance impact on your Elasticsearch cluster and should not be used in production without careful consideration. See https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html for more information. -CAUTION: This recipe uses the `node.store.allow_mmap: false` configuration value to avoid configuring memory mapping settings on the underlying host. This could have a significant performance impact on your Elasticsearch cluster and should not be used in production without careful consideration. See https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html for more information. diff --git a/config/recipes/logstash/logstash-eck.yaml b/config/recipes/logstash/logstash-eck.yaml new file mode 100644 index 0000000000..cf47e63d83 --- /dev/null +++ b/config/recipes/logstash/logstash-eck.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana +spec: + version: 8.7.0 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat +spec: + type: filebeat + version: 8.7.0 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-beats:5044"] + deployment: + podTemplate: + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - clusterName: eck + name: elasticsearch + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 diff --git a/config/recipes/logstash/logstash-es-role.yaml b/config/recipes/logstash/logstash-es-role.yaml new file mode 100644 index 0000000000..da76b7935f --- /dev/null +++ b/config/recipes/logstash/logstash-es-role.yaml @@ -0,0 +1,55 @@ +kind: Secret +apiVersion: v1 +metadata: + name: my-roles-secret +stringData: + roles.yml: |- + eck_logstash_user_role: + cluster: [ "monitor", "manage_ilm", "read_ilm", "manage_logstash_pipelines", "manage_index_templates", "cluster:admin/ingest/pipeline/get"] + indices: + - names: [ "my-index", "logstash", "logstash-*", "ecs-logstash", "ecs-logstash-*", "logs-*", "metrics-*", "synthetics-*", "traces-*" ] + privileges: [ "manage", "write", "create_index", "read", "view_index_metadata" ] +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch +spec: + version: 8.7.0 + auth: + roles: + - secretName: my-roles-secret + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - name: elasticsearch + clusterName: eck + pipelines: + - pipeline.id: main + config.string: | + input { exec { command => "uptime" interval => 10 } } + output { + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + ssl => true + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + index => "my-index" + data_stream => false + ilm_enabled => false + manage_template => false + } + } +--- \ No newline at end of file diff --git a/config/recipes/logstash/logstash-monitored.yaml b/config/recipes/logstash/logstash-monitored.yaml new file mode 100644 index 0000000000..93243f3308 --- /dev/null +++ b/config/recipes/logstash/logstash-monitored.yaml @@ -0,0 +1,134 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana +spec: + version: 8.7.0 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat +spec: + type: filebeat + version: 8.7.0 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-beats:5044"] + deployment: + podTemplate: + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - clusterName: eck + name: elasticsearch + monitoring: + metrics: + elasticsearchRefs: + - name: elasticsearch-monitoring + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch-monitoring +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 3 + config: + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana-monitoring +spec: + version: 8.7.0 + count: 1 + elasticsearchRef: + name: elasticsearch-monitoring diff --git a/config/recipes/logstash/logstash-multi.yaml b/config/recipes/logstash/logstash-multi.yaml new file mode 100644 index 0000000000..453da592dc --- /dev/null +++ b/config/recipes/logstash/logstash-multi.yaml @@ -0,0 +1,153 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: qa + labels: + name: qa +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: qa + namespace: qa +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: production +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat +spec: + type: filebeat + version: 8.7.0 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-beats:5044"] + deployment: + podTemplate: + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - clusterName: prod-es + name: production + - clusterName: qa-es + name: qa + namespace: qa + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + pipeline { + send_to => 'prod' + } + pipeline { + send_to => 'qa' + } + } + - pipeline.id: production + config.string: | + input { + pipeline { + address => 'prod' + } + } + output { + elasticsearch { + hosts => [ "${PROD_ES_ES_HOSTS}" ] + user => "${PROD_ES_ES_USER}" + password => "${PROD_ES_ES_PASSWORD}" + cacert => "${PROD_ES_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + - pipeline.id: qa + config.string: | + input { + pipeline { + address => 'qa' + } + } + output { + elasticsearch { + hosts => [ "${QA_ES_ES_HOSTS}" ] + user => "${QA_ES_ES_USER}" + password => "${QA_ES_ES_PASSWORD}" + cacert => "${QA_ES_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +--- diff --git a/config/recipes/logstash/logstash-pipeline-as-secret.yaml b/config/recipes/logstash/logstash-pipeline-as-secret.yaml new file mode 100644 index 0000000000..a05f5ac425 --- /dev/null +++ b/config/recipes/logstash/logstash-pipeline-as-secret.yaml @@ -0,0 +1,116 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana +spec: + version: 8.7.0 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat +spec: + type: filebeat + version: 8.7.0 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-beats:5044"] + deployment: + podTemplate: + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - clusterName: eck + name: elasticsearch + pipelinesRef: + secretName: logstash-pipeline + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +--- +apiVersion: v1 +kind: Secret +metadata: + name: logstash-pipeline +stringData: + pipelines.yml: |- + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output { + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } diff --git a/config/recipes/logstash/logstash-pipeline-as-volume.yaml b/config/recipes/logstash/logstash-pipeline-as-volume.yaml new file mode 100644 index 0000000000..f59e0a051f --- /dev/null +++ b/config/recipes/logstash/logstash-pipeline-as-volume.yaml @@ -0,0 +1,129 @@ +--- +apiVersion: elasticsearch.k8s.elastic.co/v1 +kind: Elasticsearch +metadata: + name: elasticsearch +spec: + version: 8.7.0 + nodeSets: + - name: default + count: 3 + config: + # This setting has performance implications. See the README for more details. + node.store.allow_mmap: false +--- +apiVersion: kibana.k8s.elastic.co/v1 +kind: Kibana +metadata: + name: kibana +spec: + version: 8.7.0 + count: 1 + elasticsearchRef: + name: elasticsearch +--- +apiVersion: beat.k8s.elastic.co/v1beta1 +kind: Beat +metadata: + name: filebeat +spec: + type: filebeat + version: 8.7.0 + config: + filebeat.inputs: + - type: log + paths: + - /data/logstash-tutorial.log + output.logstash: + hosts: ["logstash-ls-beats:5044"] + deployment: + podTemplate: + spec: + automountServiceAccountToken: true + initContainers: + - name: download-tutorial + image: curlimages/curl + command: ["/bin/sh"] + args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] + volumeMounts: + - name: data + mountPath: /data + containers: + - name: filebeat + volumeMounts: + - name: data + mountPath: /data + - name: beat-data + mountPath: /usr/share/filebeat/data + volumes: + - name: data + emptydir: {} + - name: beat-data + emptydir: {} +--- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash +spec: + count: 1 + version: 8.7.0 + elasticsearchRefs: + - clusterName: eck + name: elasticsearch + pipelines: + - pipeline.id: main + path.config: /usr/share/logstash/config/pipelines + services: + - name: beats + service: + spec: + type: ClusterIP + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 + podTemplate: + spec: + volumes: + - name: logstash-pipeline + secret: + secretName: logstash-pipeline + containers: + - name: logstash + volumeMounts: + - mountPath: /usr/share/logstash/config/pipelines + name: logstash-pipeline + readOnly: true +--- +apiVersion: v1 +kind: Secret +metadata: + name: logstash-pipeline +stringData: + input.conf: |- + input { + beats { + port => 5044 + } + } + filters.conf: |- + filter { + grok { + match => { "message" => "%{HTTPD_COMMONLOG}"} + } + geoip { + source => "[source][address]" + target => "[source]" + } + } + output.conf: |- + output { + elasticsearch { + hosts => [ "${ECK_ES_HOSTS}" ] + user => "${ECK_ES_USER}" + password => "${ECK_ES_PASSWORD}" + cacert => "${ECK_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } diff --git a/config/recipes/logstash/logstash.yaml b/config/recipes/logstash/logstash.yaml deleted file mode 100644 index 8c41800efc..0000000000 --- a/config/recipes/logstash/logstash.yaml +++ /dev/null @@ -1,189 +0,0 @@ ---- -apiVersion: elasticsearch.k8s.elastic.co/v1 -kind: Elasticsearch -metadata: - name: elasticsearch - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: elasticsearch -spec: - version: 8.7.0 - nodeSets: - - name: default - count: 3 - config: - # This setting has performance implications. See the README for more details. - node.store.allow_mmap: false ---- -apiVersion: kibana.k8s.elastic.co/v1 -kind: Kibana -metadata: - name: kibana - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: kibana -spec: - version: 8.7.0 - count: 1 - elasticsearchRef: - name: elasticsearch ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: logstash-config - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -data: - logstash.yml: | - http.host: "0.0.0.0" - path.config: /usr/share/logstash/pipeline ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: logstash-pipeline - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -data: - logstash.conf: | - input { - beats { - port => 5044 - } - } - filter { - grok { - match => { "message" => "%{COMBINEDAPACHELOG}"} - } - geoip { - source => "clientip" - target => "clientgeo" - } - } - output { - elasticsearch { - hosts => [ "${ES_HOSTS}" ] - user => "${ES_USER}" - password => "${ES_PASSWORD}" - cacert => '/etc/logstash/certificates/ca.crt' - } - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash - template: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash - spec: - containers: - - name: logstash - image: docker.elastic.co/logstash/logstash:8.7.0 - ports: - - name: "tcp-beats" - containerPort: 5044 - env: - - name: ES_HOSTS - value: "https://elasticsearch-es-http.default.svc:9200" - - name: ES_USER - value: "elastic" - - name: ES_PASSWORD - valueFrom: - secretKeyRef: - name: elasticsearch-es-elastic-user - key: elastic - volumeMounts: - - name: config-volume - mountPath: /usr/share/logstash/config - - name: pipeline-volume - mountPath: /usr/share/logstash/pipeline - - name: ca-certs - mountPath: /etc/logstash/certificates - readOnly: true - volumes: - - name: config-volume - configMap: - name: logstash-config - - name: pipeline-volume - configMap: - name: logstash-pipeline - - name: ca-certs - secret: - secretName: elasticsearch-es-http-certs-public ---- -apiVersion: v1 -kind: Service -metadata: - name: logstash - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash -spec: - ports: - - name: "tcp-beats" - port: 5044 - targetPort: 5044 - selector: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: logstash ---- -apiVersion: beat.k8s.elastic.co/v1beta1 -kind: Beat -metadata: - name: filebeat - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat -spec: - type: filebeat - version: 8.7.0 - config: - filebeat.inputs: - - type: log - paths: - - /data/logstash-tutorial.log - output.logstash: - hosts: ["logstash.default.svc:5044"] - deployment: - podTemplate: - metadata: - labels: - app.kubernetes.io/name: eck-logstash - app.kubernetes.io/component: filebeat - spec: - automountServiceAccountToken: true - initContainers: - - name: download-tutorial - image: curlimages/curl - command: ["/bin/sh"] - args: ["-c", "curl -L https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz | gunzip -c > /data/logstash-tutorial.log"] - volumeMounts: - - name: data - mountPath: /data - containers: - - name: filebeat - volumeMounts: - - name: data - mountPath: /data - - name: beat-data - mountPath: /usr/share/filebeat/data - volumes: - - name: data - emptydir: {} - - name: beat-data - emptydir: {} diff --git a/docs/advanced-topics/network-policies.asciidoc b/docs/advanced-topics/network-policies.asciidoc index 89f344c500..f83377e9ec 100644 --- a/docs/advanced-topics/network-policies.asciidoc +++ b/docs/advanced-topics/network-policies.asciidoc @@ -532,4 +532,49 @@ spec: podSelector: matchLabels: common.k8s.elastic.co/type: agent ----- \ No newline at end of file +---- + +[float] +[id="{p}-{page_id}-logstash-isolation"] +== Isolating {ls} + + +NOTE: {ls} may require additional access rules than those listed here, depending on plugin usage. + + +[cols="h,1"] +|=== +| Egress (outgoing) a| + +* TCP port {es_http_port} to {es} nodes in the namespace. +* UDP port {dns_port} for DNS lookup. + +|=== + + +[source,yaml,subs="attributes"] +---- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: eck-logstash + namespace: team-a +spec: + egress: + - ports: + - port: {es_http_port} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + eck.k8s.elastic.co/tenant: team-a + podSelector: + matchLabels: + common.k8s.elastic.co/type: elasticsearch + - ports: + - port: {dns_port} + protocol: UDP + podSelector: + matchLabels: + common.k8s.elastic.co/type: logstash +---- diff --git a/docs/advanced-topics/stack-monitoring.asciidoc b/docs/advanced-topics/stack-monitoring.asciidoc index cded98a268..ffe600fe32 100644 --- a/docs/advanced-topics/stack-monitoring.asciidoc +++ b/docs/advanced-topics/stack-monitoring.asciidoc @@ -9,7 +9,7 @@ endif::[] = Stack Monitoring You can enable link:https://www.elastic.co/guide/en/elasticsearch/reference/current/monitor-elasticsearch-cluster.html[Stack Monitoring] -on Elasticsearch, Kibana and Beats to collect and ship their metrics and logs to a dedicated monitoring cluster. +on Elasticsearch, Kibana, Beats and Logstash to collect and ship their metrics and logs to a dedicated monitoring cluster. To enable Stack Monitoring, simply reference the monitoring Elasticsearch cluster in the `spec.monitoring` section of their specification. @@ -46,7 +46,7 @@ spec: version: {version} elasticsearchRef: name: monitored-sample - namespace: production <2> + namespace: production <1> monitoring: metrics: elasticsearchRefs: @@ -69,16 +69,30 @@ spec: metrics: elasticsearchRefs: - name: monitoring - namespace: observability <3> + namespace: observability <1> logs: elasticsearchRefs: - name: monitoring - namespace: observability <3> + namespace: observability <1> +--- +apiVersion: logstash.k8s.elastic.co/v1beta1 +kind: Logstash +metadata: + name: monitored-sample +spec: + version: {version} + monitoring: + metrics: + elasticsearchRefs: + - name: monitoring + namespace: observability <1> + logs: + elasticsearchRefs: + - name: monitoring + namespace: observability <1> ---- -<1> The use of `namespace` is optional if the monitoring Elasticsearch cluster and the monitored Elasticsearch cluster are running in the same namespace. -<2> The use of `namespace` is optional if the Elasticsearch cluster and the Kibana instance are running in the same namespace. -<3> The use of `namespace` is optional if the Elasticsearch cluster and the Beats instance are running in the same namespace. +<1> The use of `namespace` is optional if the monitoring Elasticsearch cluster and the monitored Elastic Stack resource are running in the same namespace. NOTE: You can configure an Elasticsearch cluster to monitor itself. @@ -86,6 +100,8 @@ NOTE: If Stack Monitoring is configured for a Beat, but the corresponding Elasti NOTE: If Logs Stack Monitoring is configured for a Beat, and custom container arguments (`podTemplate.spec.containers[].args`) include `-e`, which enables logging to stderr and disables log file output, this argument will be removed from the Pod to allow the Filebeat sidecar to consume the Beat's log files. +NOTE: Stack Monitoring for Logstash on ECK is only available for Logstash versions 8.7.0 and later. + IMPORTANT: The monitoring cluster must be managed by ECK in the same Kubernetes cluster as the monitored one. You can send metrics and logs to two different Elasticsearch monitoring clusters. diff --git a/docs/operating-eck/eck-permissions.asciidoc b/docs/operating-eck/eck-permissions.asciidoc index 23398e37d7..19242d0e69 100644 --- a/docs/operating-eck/eck-permissions.asciidoc +++ b/docs/operating-eck/eck-permissions.asciidoc @@ -111,5 +111,10 @@ Agent/finalizers ElasticMapsServer/status + ElasticMapsServer/finalizers |maps.k8s.elastic.co|no +|Logstash + +Logstash/status + +Logstash/finalizers +|logstashes.k8s.elastic.co|no + |=== diff --git a/docs/orchestrating-elastic-stack-applications/logstash.asciidoc b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc new file mode 100644 index 0000000000..dd47895830 --- /dev/null +++ b/docs/orchestrating-elastic-stack-applications/logstash.asciidoc @@ -0,0 +1,619 @@ +:page_id: logstash +:logstash_recipes: https://raw.githubusercontent.com/elastic/cloud-on-k8s/{eck_release_branch}/config/recipes/logstash +ifdef::env-github[] +**** +link:https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-{page_id}.html[View this document on the Elastic website] +**** +endif::[] +[id="{p}-{page_id}"] += Run Logstash on ECK + +experimental[] + +This section describes how to configure and deploy Logstash with ECK. + +* <<{p}-logstash-quickstart,Quickstart>> +* <<{p}-logstash-configuration,Configuration>> +** <<{p}-logstash-configuring-logstash,Configuring Logstash>> +** <<{p}-logstash-pipelines,Configuring Pipelines>> +** <<{p}-logstash-pipelines-es,Using Elasticsearch in Logstash Pipelines>> +** <<{p}-logstash-expose-services,Exposing Services>> +* <<{p}-logstash-configuration-examples,Configuration examples>> +* <<{p}-logstash-advanced-configuration,Advanced Configuration>> +** <<{p}-logstash-jvm-options,Setting JVM Options>> +** <<{p}-logstash-scaling-logstash,Scaling Logstash>> +* <<{p}-logstash-technical-preview-limitations,Technical Preview Limitations>> + + +NOTE: Running Logstash on ECK is compatible only with Logstash 8.7+. + + +[id="{p}-logstash-quickstart"] +== Quickstart + +experimental[] + +Add the following specification to create a minimal Logstash deployment that will listen to a Beats agent or Elastic Agent configured to send to Logstash on port 5044, create the service and write the output to an Elasticsearch cluster named `quickstart`, created in the link:k8s-quickstart.html[Elasticsearch quickstart]. + +[source,yaml,subs="attributes,+macros,callouts"] +---- +cat $$<<$$EOF | kubectl apply -f - +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + count: 1 + elasticsearchRefs: + - name: quickstart + clusterName: qs + version: {version} + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USER}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + services: + - name: beats + service: + spec: + type: NodePort + ports: + - port: 5044 + name: "filebeat" + protocol: TCP + targetPort: 5044 +EOF +---- + +Check <<{p}-logstash-configuration-examples>> for more ready-to-use manifests. + +. Check the status of Logstash ++ +[source,sh] +---- +kubectl get logstash +---- ++ +[source,sh,subs="attributes"] +---- +NAME AVAILABLE EXPECTED AGE VERSION +quickstart 3 3 4s {version} +---- + +. List all the Pods that belong to a given Logstash specification. ++ +[source,sh] +---- +kubectl get pods --selector='logstash.k8s.elastic.co/name=quickstart' +---- ++ +[source,sh] +---- +NAME READY STATUS RESTARTS AGE +quickstart-ls-0 1/1 Running 0 91s +---- + +. Access logs for a Pod. + +[source,sh] +---- +kubectl logs -f quickstart-ls-0 +---- + +[id="{p}-logstash-configuration"] +== Configuration + +experimental[] + +[id="{p}-logstash-upgrade-specification"] +=== Upgrade the Logstash specification + +You can upgrade the Logstash version or change settings by editing the YAML specification. ECK applies the changes by performing a rolling restart of Logstash Pods. + +[id="{p}-logstash-configuring-logstash"] +=== Logstash configuration + +Define the Logstash configuration (the ECK equivalent to `logstash.yml`) in the `spec.config` section: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 1 + elasticsearchRefs: + - name: quickstart + clusterName: qs + config: <1> + pipeline.workers: 4 + log.level: debug +---- +<1> Customize Logstash configuration using `logstash.yml` settings here + + +Alternatively, you can provide the configuration through a Secret specified in the `spec.configRef` section. The Secret must have an `logstash.yml` entry with these settings: +[source,yaml,subs="attributes,+macros"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 1 + elasticsearchRefs: + - name: quickstart + clusterName: qs + configRef: + secretName: quickstart-config +--- +apiVersion: v1 +kind: Secret +metadata: + name: quickstart-config +stringData: + logstash.yml: |- + pipeline.workers: 4 + log.level: debug +---- + + +[id="{p}-logstash-pipelines"] +=== Configuring Logstash pipelines + +Define Logstash pipelines in the `spec.pipelines` section (the ECK equivalent to `pipelines.yml`): + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 1 + elasticsearchRefs: + - clusterName: qs + name: quickstart + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USER}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } +---- + +Alternatively, you can provide the pipeline configuration through a Secret specified in the `spec.pipelinesRef` element. The Secret must have a `logstash.yml` entry with this configuration: +[source,yaml,subs="attributes,+macros"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 1 + elasticsearchRefs: + - clusterName: qs + name: quickstart + pipelinesRef: + secretName: quickstart-pipeline +--- +apiVersion: v1 +kind: Secret +metadata: + name: quickstart-pipeline +stringData: + pipelines.yml: |- + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USER}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + +---- + +Logstash on ECK supports all options present in `pipelines.yml`, including settings to update the number of workers, and + the size of the batch that the pipeline will process. This also includes using `path.config` to point to volumes + mounted on the Logstash container: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 1 + elasticsearchRefs: + - clusterName: qs + name: quickstart + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { + hosts => [ "${QS_ES_HOSTS}" ] + user => "${QS_ES_USER}" + password => "${QS_ES_PASSWORD}" + cacert => "${QS_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } +---- + +NOTE: Logstash persistent queues (PQs) and dead letter queues (DLQs) are not currently managed by the Logstash operator, and using them will require you to create and manage your own Volumes and VolumeMounts + + +[id="{p}-logstash-pipelines-es"] +=== Using Elasticsearch in Logstash pipelines + +The `spec.elasticsearchRefs` section provides a mechanism to help configure Logstash to estabish a secured connection to one or more managed Elasticsearch clusters. By default, each `elasticsearchRef` will target all nodes in its referenced Elasticsearch cluster. If you want to direct traffic to specific nodes of your Elasticsearch cluster, refer to <<{p}-traffic-splitting>> for more information and examples. + +When you use `elasticsearchRefs` in a Logstash pipeline, the Logstash operator creates the necessary resources from the associated Elasticsearch cluster, and provides environment variables to allow these resources to be accessed from the pipeline configuration. +Environment variables are replaced at runtime with the appropriate values.``` +The environment variables have a fixed naming convention: + +* `NORMALIZED_CLUSTERNAME_ES_HOSTS` +* `NORMALIZED_CLUSTERNAME_ES_USER` +* `NORMALIZED_CLUSTERNAME_ES_PASSWORD` +* `NORMALIZED_CLUSTERNAME_ES_SSL_CERTIFICATE_AUTHORITY` + +where NORMALIZED_CLUSTERNAME is the value taken from the `clusterName` field of the `elasticsearchRef` property, capitalized, and `-` transformed to `_` - eg, prod-es, would becomed PROD_ES. + +NOTE: The `clusterName` value should be unique across all referenced Elasticsearches in the same Logstash spec. + +[NOTE] +-- +The Logstash ECK operator creates a user called `eck_logstash_user_role` when an `elasticsearchRef` is specified. This user has the following permissions: +``` + "cluster": ["monitor", "manage_ilm", "read_ilm", "manage_logstash_pipelines", "manage_index_templates", "cluster:admin/ingest/pipeline/get",], + "indices": [ + { + "names": [ "logstash", "logstash-*", "ecs-logstash", "ecs-logstash-*", "logs-*", "metrics-*", "synthetics-*", "traces-*" ], + "privileges": ["manage", "write", "create_index", "read", "view_index_metadata"] + } + +``` +You can <<{p}-users-and-roles,update user permissions>> to include more indices if the Elasticsearch plugin is expected to use indices other than the default. See the <<{p}-logstash-configuration-custom-index, Logstash configuration with a custom index>> sample configuration that creates a user that writes to a custom index. +-- + +This example demonstrates how to create a Logstash deployment that connects to +different Elasticsearch instances, one of which is in a separate namespace: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 1 + elasticsearchRefs: <1> + - clusterName: prod-es <2> + name: prod + - clusterName: qa-es <3> + name: qa + namespace: qa + pipelines: + - pipeline.id: main + config.string: | + input { + beats { + port => 5044 + } + } + output { + elasticsearch { <4> + hosts => [ "${PROD_ES_ES_HOSTS}" ] + user => "${PROD_ES_ES_USER}" + password => "${PROD_ES_ES_PASSWORD}" + cacert => "${PROD_ES_ES_SSL_CERTIFICATE_AUTHORITY}" + } + elasticsearch { <4> + hosts => [ "${QA_ES_ES_HOSTS}" ] + user => "${QA_ES_ES_USER}" + password => "${QA_ES_ES_PASSWORD}" + cacert => "${QA_ES_ES_SSL_CERTIFICATE_AUTHORITY}" + } + } + +---- + +<1> Define Elasticsearch references in the CRD. This will create the appropriate Secrets to store certificate details and the rest of the connection information, and create environment variables to allow them to be referred to in Logstash pipeline configurations. +<2> This refers to an Elasticsearch cluster residing in the same namespace as the Logstash instances. +<3> This refers to an Elasticsearch cluster residing in a different namespace to the Logstash instances. +<4> Elasticsearch output definitions - use the environment variables created by the Logstash operator when specifying an `ElasticsearchRef`. Note the use of "normalized" versions of the `clusterName` in the environment variables used to populate the relevant fields. + + + +[id="{p}-logstash-expose-services"] +=== Expose services + +By default, the Logstash operator creates a headless Service for the metrics endpoint to enable metric collection by the Metricbeat sidecar for Stack Monitoring: + + +[source,sh] +---- +kubectl get service quickstart-ls-api +---- + +[source,sh,subs="attributes"] +---- +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +quickstart-ls-api ClusterIP None 9600/TCP 48s +---- + +Additional services can be added in the `spec.services` section of the resource: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +services: + - name: beats + service: + spec: + ports: + - port: 5044 + name: "winlogbeat" + protocol: TCP + - port: 5045 + name: "filebeat" + protocol: TCP +---- + +[id="{p}-logstash-pod-configuration"] +=== Pod configuration +You can <<{p}-customize-pods,customize the Logstash Pod>> using a Pod template, defined in the `spec.podTemplate` section of the configuration. + +This example demonstrates how to create a Logstash deployment with increased heap size and resource limits. + +[source,yaml,subs="attributes"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: logstash-sample +spec: + version: {version} + count: 1 + elasticsearchRefs: + - name: "elasticsearch-sample" + clusterName: "sample" + podTemplate: + spec: + containers: + - name: logtash + env: + - name: LS_JAVA_OPTS + value: "-Xmx2g -Xms2g" + resources: + requests: + memory: 1Gi + cpu: 0.5 + limits: + memory: 4Gi + cpu: 2 +---- + +The name of the container in the Pod template must be `logstash`. + + +[id="{p}-logstash-configuration-examples"] +== Configuration examples + +experimental[] + +This section contains manifests that illustrate common use cases, and can be your starting point in exploring Logstash deployed with ECK. These manifests are self-contained and work out-of-the-box on any non-secured Kubernetes cluster. They all contain a three-node Elasticsearch cluster and a single Kibana instance. + +CAUTION: The examples in this section are for illustration purposes only and should not be considered to be production-ready. Some of these examples use the `node.store.allow_mmap: false` setting on Elasticsearch which has performance implications and should be tuned for production workloads, as described in <<{p}-virtual-memory>>. + + +[id="{p}-logstash-configuration-single-pipeline-crd"] +=== Single pipeline defined in CRD + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-eck.yaml +---- + +Deploys Logstash with a single pipeline defined in the CRD + +[id="{p}-logstash-configuration-single-pipeline-secret"] +=== Single Pipeline defined in Secret + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-pipeline-as-secret.yaml +---- + +Deploys Logstash with a single pipeline defined in a secret, referenced by a `pipelineRef` + +[id="{p}-logstash-configuration-pipeline-volume"] +=== Pipeline configuration in mounted volume + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-pipeline-as-volume.yaml +---- + +Deploys Logstash with a single pipeline defined in a secret, mounted as a volume, and referenced by +`path.config` + +[id="{p}-logstash-configuration-custom-index"] +=== Writing to a custom Elasticsearch index + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-es-role.yaml +---- + +Deploys Logstash and Elasticsearch, and creates an updated version of the `eck_logstash_user_role` to write to a user specified index. + + +[id="{p}-logstash-configuration-stack-monitoring"] +=== Elasticsearch and Kibana Stack Monitoring + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-monitored.yaml +---- + +Deploys an Elasticsearch and Kibana monitoring cluster, and a Logstash that will send its monitoring information to this cluster. You can view the stack monitoring information in the monitoring cluster's Kibana + +[id="{p}-logstash-configuration-multiple-pipelines"] +=== Multiple pipelines/multiple Elasticsearch clusters + +[source,sh,subs="attributes"] +---- +kubectl apply -f {logstash_recipes}/logstash-multi.yaml +---- + +Deploys Elasticsearch in prod and qa configurations, running in separate namespaces. Logstash is configured with a multiple pipeline->pipeline configuration, with a source pipeline routing to `prod` and `qa` pipelines. + +[id="{p}-logstash-advanced-configuration"] +== Advanced configuration + +experimental[] + +[id="{p}-logstash-jvm-options"] +=== Setting JVM options + + +You can change JVM settings by using the `LS_JAVA_OPTS` environment variable to override default settings in `jvm.options`. This approach ensures that expected settings from `jvm.options` are set, and only options that explicitly need to be overridden are. + +To do, this, set the `LS_JAVA_OPTS` environment variable in the container definition of your Logstash resource: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + containers: + - name: logstash + env: + - name: LS_JAVA_OPTS <1> + value: "-Xmx2g -Xms2g" +---- +<1> This will change the maximum and minimum heap size of the JVM on each pod to 2GB + +[id="{p}-logstash-scaling-logstash"] +== Scaling Logstash + +experimental[] + +The ability to scale Logstash is highly dependent on the pipeline configurations, and the plugins used in those pipelines. Not all Logstash deployments can be scaled horizontally by increasing the number of Logstash Pods defined in the Logstash resource. +Increasing the number of Pods can cause data loss/duplication, or Pods running idle because they are unable to be utilized. + +These risks are especially likely with plugins that: + +* Retrieve data from external sources. +** Plugins that retrieve data from external sources, and require some level of coordination between nodes to split up work, are not good candidates for scaling horizontally, and would likely produce some data duplication. These are plugins such as the JDBC input plugin, which has no automatic way to split queries across Logstash instances, or the S3 input, which has no way to split which buckets to read across Logstash instances. +** Plugins that retrieve data from external sources, where work is distributed externally to Logstash, but may impose their own limits. These are plugins like the Kafka input, or Azure event hubs, where the parallelism is limited by the number of partitions vs the number of consumers. In cases like this, extra Logstash Pods may be idle if the number of consumer threads multiplied by the number of Pods is greater than the number of partitions. +* Plugins that require events to be received in order. +** Certain plugins, such as the aggregate filter, expect events to be received in strict order to run without error or data loss. Any plugin that requires the number of pipeline workers to be `1` will also have issues when horizontal scaling is used. + If the pipeline does not contain any such plugin, the number of Logstash instances can be increased by setting the `count` property in the Logstash resource: + +[source,yaml,subs="attributes,+macros,callouts"] +---- +apiVersion: logstash.k8s.elastic.co/v1alpha1 +kind: Logstash +metadata: + name: quickstart +spec: + version: {version} + count: 3 +---- + + + +[id="{p}-logstash-technical-preview-limitations"] +== Technical Preview limitations + +experimental[] + +Note that this release is a technical preview. It is still under active development and has additional limitations: + +[id="{p}-logstash-technical-preview-persistence"] +=== No integrated support for persistence +The operator provides no integrated support for persistence, including PQ, DLQ support and plugins that may require persistent storage to keep track of state - any persistence should be added manually. + +[id="{p}-logstash-technical-preview-elasticsearchref"] +=== `ElasticsearchRef` implementation in plugins is in preview mode +Adding Elasticsearch to plugin definitions requires the use of environment variables populated by the Logstash operator, which may change in future versions of the Logstash operator. + +[id="{p}-logstash-technical-preview-limted-plugins"] +=== Limited support for plugins + +Not all {ls} plugins are supported for this technical preview. +Note that this is not an exhaustive list, and plugins outside of the https://www.elastic.co/support/matrix#logstash_plugins[Logstash plugin matrix] have not been considered for this list. + +**Supported plugins** + +These plugins have been tested and are supported: + +* logstash-input-beats +* logstash-input-elastic_agent +* logstash-input-kafka +* logstash-input-tcp +* logstash-input-http +* logstash-input-udp + +Most filter and output plugins are supported, with some exceptions noted in the next section. + +**Plugins not supported at technical preview** + +These plugins are not supported: + +* logstash-filter-jdbc_static +* logstash-filter-jdbc_streaming +* logstash-filter-aggregate + +**Plugins that may require additional manual work** + +Other {ls} filter and output plugins work, but require additional manual steps to mount volumes for certain configurations. +For example, logstash-output-s3 requires mounting a volume to store in-progress work to avoid data loss. + + diff --git a/docs/orchestrating-elastic-stack-applications/managing-compute-resources.asciidoc b/docs/orchestrating-elastic-stack-applications/managing-compute-resources.asciidoc index 2752e69ba1..beb6435029 100644 --- a/docs/orchestrating-elastic-stack-applications/managing-compute-resources.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/managing-compute-resources.asciidoc @@ -109,7 +109,7 @@ A link:https://github.com/kubernetes/kubernetes/issues/51135[known Kubernetes is [float] [id="{p}-compute-resources-kibana-and-apm"] -=== Set compute resources for Kibana, Enterprise Search, Elastic Maps Server and APM Server +=== Set compute resources for Kibana, Enterprise Search, Elastic Maps Server, APM Server and Logstash .Kibana [source,yaml,subs="attributes"] @@ -205,6 +205,30 @@ spec: - name: JAVA_OPTS value: -Xms3500m -Xmx3500m ---- +.Logstash +[source,yaml,subs="attributes"] +---- +apiVersion: logstash.k8s.elastic.co/{eck_crd_version} +kind: logstash +metadata: + name: logstash-quickstart +spec: + version: {version} + podTemplate: + spec: + containers: + - name: logstash + resources: + requests: + memory: 4Gi + cpu: 1 + limits: + memory: 4Gi + cpu: 2 + env: + - name: LS_JAVA_OPTS + value: -Xms2000m -Xmx2000m +---- For the container name, use `apm-server`, `maps`, `kibana` or `enterprise-search`, respectively. @@ -282,6 +306,7 @@ If `resources` is not defined in the specification of an object, then the operat |Elastic Agent |350Mi |350Mi |Elastic Maps Server |200Mi |200Mi |Enterprise Search |4Gi |4Gi +|Logstash |2Gi |2Gi |=== If the Kubernetes cluster is configured with https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/[LimitRanges] that enforce a minimum memory constraint, they could interfere with the operator defaults and cause object creation to fail. diff --git a/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc b/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc index 4e6a59b4c7..103430e726 100644 --- a/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc +++ b/docs/orchestrating-elastic-stack-applications/orchestrating-elastic-stack-applications.asciidoc @@ -17,6 +17,7 @@ endif::[] - <<{p}-maps>> - <<{p}-enterprise-search>> - <<{p}-beat>> +- <<{p}-logstash>> - <<{p}-stack-helm-chart>> - <<{p}-recipes>> - <<{p}-securing-stack>> @@ -37,6 +38,7 @@ include::agent-fleet.asciidoc[leveloffset=+1] include::maps.asciidoc[leveloffset=+1] include::enterprise-search.asciidoc[leveloffset=+1] include::beat.asciidoc[leveloffset=+1] +include::logstash.asciidoc[leveloffset=+1] include::stack-helm-chart.asciidoc[leveloffset=+1] include::recipes.asciidoc[leveloffset=+1] include::securing-stack.asciidoc[leveloffset=+1] diff --git a/docs/quickstart.asciidoc b/docs/quickstart.asciidoc index 1b27d7e98e..ded61d7999 100644 --- a/docs/quickstart.asciidoc +++ b/docs/quickstart.asciidoc @@ -59,6 +59,7 @@ customresourcedefinition.apiextensions.k8s.io/elasticmapsservers.maps.k8s.elasti customresourcedefinition.apiextensions.k8s.io/elasticsearches.elasticsearch.k8s.elastic.co created customresourcedefinition.apiextensions.k8s.io/enterprisesearches.enterprisesearch.k8s.elastic.co created customresourcedefinition.apiextensions.k8s.io/kibanas.kibana.k8s.elastic.co created +customresourcedefinition.apiextensions.k8s.io/logstashes.logstash.k8s.elastic.co created ---- . Install the operator with its RBAC rules: diff --git a/docs/supported-versions.asciidoc b/docs/supported-versions.asciidoc index b7e4268646..7f4ad1884b 100644 --- a/docs/supported-versions.asciidoc +++ b/docs/supported-versions.asciidoc @@ -7,6 +7,7 @@ * Beats: 7.0+, 8+ * Elastic Agent: 7.10+ (standalone), 7.14+ (Fleet), 8+ * Elastic Maps Server: 7.11+, 8+ +* Logstash: 8.7+ ECK should work with all conformant installers as listed in these link:https://github.com/cncf/k8s-conformance/blob/master/faq.md#what-is-a-distribution-hosted-platform-and-an-installer[FAQs]. Distributions include source patches and so may not work as-is with ECK.