diff --git a/.gitbook.yaml b/.gitbook.yaml
index 8c4b92479..703fd8f6e 100644
--- a/.gitbook.yaml
+++ b/.gitbook.yaml
@@ -9,6 +9,7 @@ redirects:
input/disk: ./pipeline/inputs/disk-io-metrics.md
#inputs/docker: ./pipeline/inputs/
input/dummy: ./pipeline/inputs/dummy.md
+ input/elasticsearch: ./pipeline/inputs/elasticsearch.md
input/exec: ./pipeline/inputs/exec.md
input/fluentbit: ./pipeline/inputs/fluentbit-metrics.md
input/forward: ./pipeline/inputs/forward.md
@@ -20,6 +21,7 @@ redirects:
input/netif: ./pipeline/inputs/network-io-metrics.md
input/proc: ./pipeline/inputs/process.md
input/random: ./pipeline/inputs/random.md
+ input/splunk: ./pipeline/inputs/splunk.md
input/serial: ./pipeline/inputs/serial-interface.md
#inputs/statsd: ./pipeline/inputs/
input/stdin: ./pipeline/inputs/standard-input.md
@@ -28,6 +30,7 @@ redirects:
input/tail: ./pipeline/inputs/tail.md
input/tcp: ./pipeline/inputs/tcp.md
input/thermal: ./pipeline/inputs/thermal.md
+ input/udp: ./pipeline/inputs/udp.md
input/winlog: ./pipeline/inputs/windows-event-log.md
input/winevtlog: ./pipeline/inputs/windows-event-log-winevtlog.md
@@ -50,6 +53,7 @@ redirects:
output/azure_kusto: ./pipeline/outputs/azure_kusto.md
output/bigquery: ./pipeline/outputs/bigquery.md
output/counter: ./pipeline/outputs/counter.md
+ output/chronicle: ./pipeline/outputs/chronicle.md
output/cloudwatch: ./pipeline/outputs/cloudwatch.md
output/datadog: ./pipeline/outputs/datadog.md
output/es: ./pipeline/outputs/elasticsearch.md
diff --git a/.gitbook/assets/FluentBitDocumentation-01-01.png b/.gitbook/assets/FluentBitDocumentation-01-01.png
new file mode 100644
index 000000000..c440b9f1b
Binary files /dev/null and b/.gitbook/assets/FluentBitDocumentation-01-01.png differ
diff --git a/.gitbook/assets/FluentBitV3-02.png b/.gitbook/assets/FluentBitV3-02.png
new file mode 100644
index 000000000..a8c380b00
Binary files /dev/null and b/.gitbook/assets/FluentBitV3-02.png differ
diff --git a/.gitbook/assets/azure-logs-ingestion-overview.png b/.gitbook/assets/azure-logs-ingestion-overview.png
new file mode 100644
index 000000000..044a660ea
Binary files /dev/null and b/.gitbook/assets/azure-logs-ingestion-overview.png differ
diff --git a/.gitbook/assets/logo_documentation_2.1.png b/.gitbook/assets/logo_documentation_2.1.png
new file mode 100644
index 000000000..867a47dd4
Binary files /dev/null and b/.gitbook/assets/logo_documentation_2.1.png differ
diff --git a/.gitbook/assets/v2.2 big@2x.png b/.gitbook/assets/v2.2 big@2x.png
new file mode 100644
index 000000000..1ae8c088e
Binary files /dev/null and b/.gitbook/assets/v2.2 big@2x.png differ
diff --git a/.github/workflows/cron-stale.yaml b/.github/workflows/cron-stale.yaml
index 98219385c..07d4e9b67 100644
--- a/.github/workflows/cron-stale.yaml
+++ b/.github/workflows/cron-stale.yaml
@@ -11,7 +11,7 @@ jobs:
issues: write
pull-requests: write
steps:
- - uses: actions/stale@v7
+ - uses: actions/stale@v8
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days. Maintainers can add the `exempt-stale` label.'
diff --git a/.github/workflows/pr-lint.yaml b/.github/workflows/pr-lint.yaml
index 432e2a91e..6460e74d8 100644
--- a/.github/workflows/pr-lint.yaml
+++ b/.github/workflows/pr-lint.yaml
@@ -8,16 +8,16 @@ jobs:
runs-on: ubuntu-latest
name: PR - Actionlint
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- run: |
echo "::add-matcher::.github/actionlint-matcher.json"
bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash)
./actionlint -color -shellcheck=
shell: bash
- docslint-pr:
+ shellcheck-pr:
runs-on: ubuntu-latest
- name: PR - Markdownlint
+ name: PR - Shellcheck
steps:
- - name: Run markdownlint
- uses: actionshub/markdownlint@v2.1.2
+ - uses: actions/checkout@v4
+ - uses: ludeeus/action-shellcheck@master
diff --git a/README.md b/README.md
index 0c718117a..3656b69aa 100644
--- a/README.md
+++ b/README.md
@@ -1,27 +1,27 @@
---
-description: High Performance Log and Metrics Processor
+description: High Performance Telemetry Agent for Logs, Metrics and Traces
---
-# Fluent Bit v2.0 Documentation
+# Fluent Bit v2.2 Documentation
-![](.gitbook/assets/logo\_documentation\_2.0.png)
+
-[Fluent Bit](http://fluentbit.io) is a Fast and Lightweight Logs and Metrics Processor and Forwarder for Linux, OSX, Windows and BSD family operating systems. It has been made with a strong focus on performance to allow the collection of events from different sources without complexity.![](https://static.scarf.sh/a.png?x-pxid=71f0e011-761f-4c6f-9a89-38817887faae)
+[Fluent Bit](http://fluentbit.io) is a Fast and Lightweight **Telemetry Agent** for Logs, Metrics, and Traces for Linux, macOS, Windows, and BSD family operating systems. It has been made with a strong focus on performance to allow the collection and processing of telemetry data from different sources without complexity.![](https://static.scarf.sh/a.png?x-pxid=71f0e011-761f-4c6f-9a89-38817887faae)
## Features
-* High Performance
+* High Performance: High throughput with low resources consumption
* Data Parsing
* Convert your unstructured messages using our parsers: [JSON](pipeline/parsers/json.md), [Regex](pipeline/parsers/regular-expression.md), [LTSV](pipeline/parsers/ltsv.md) and [Logfmt](pipeline/parsers/logfmt.md)
-* Metrics Collection (Prometheus compatible)
+* Metrics Support: Prometheus and OpenTelemetry compatible
* Reliability and Data Integrity
* [Backpressure](administration/backpressure.md) Handling
* [Data Buffering](administration/buffering-and-storage.md) in memory and file system
* Networking
* Security: built-in TLS/SSL support
* Asynchronous I/O
-* Pluggable Architecture and [Extensibility](development/library\_api.md): Inputs, Filters and Outputs
- * More than 80 built-in plugins available
+* Pluggable Architecture and [Extensibility](development/library_api.md): Inputs, Filters and Outputs
+ * More than 100 built-in plugins are available
* Extensibility
* Write any input, filter or output plugin in C language
* WASM: [WASM Filter Plugins](development/wasm-filter-plugins.md) or [WASM Input Plugins](development/wasm-input-plugins.md)
@@ -35,4 +35,6 @@ description: High Performance Log and Metrics Processor
## Fluent Bit, Fluentd and CNCF
-[Fluent Bit](http://fluentbit.io) is a [CNCF](https://cncf.io) sub-project under the umbrella of [Fluentd](http://fluentd.org), it's licensed under the terms of the [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0). This project was originally created by [Treasure Data](https://www.treasuredata.com) and is currently a **vendor neutral** and community driven project.
+[Fluent Bit](http://fluentbit.io) is a [CNCF](https://cncf.io) **graduated** sub-project under the umbrella of [Fluentd](http://fluentd.org). Fluent Bit is licensed under the terms of the [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0).
+
+Fluent Bit was originally created by [Eduardo Silva](https://www.linkedin.com/in/edsiper/). As a CNCF-hosted project, it is a fully **vendor-neutral** and community-driven project.
diff --git a/SUMMARY.md b/SUMMARY.md
index 64a927069..d8151098f 100644
--- a/SUMMARY.md
+++ b/SUMMARY.md
@@ -1,6 +1,6 @@
# Table of contents
-* [Fluent Bit v2.0 Documentation](README.md)
+* [Fluent Bit v2.2 Documentation](README.md)
## About
@@ -59,7 +59,7 @@
* [Configuration File](administration/configuring-fluent-bit/yaml/configuration-file.md)
* [Unit Sizes](administration/configuring-fluent-bit/unit-sizes.md)
* [Multiline Parsing](administration/configuring-fluent-bit/multiline-parsing.md)
-* [Security](administration/security.md)
+* [Transport Security](administration/transport-security.md)
* [Buffering & Storage](administration/buffering-and-storage.md)
* [Backpressure](administration/backpressure.md)
* [Scheduling and Retries](administration/scheduling-and-retries.md)
@@ -67,8 +67,9 @@
* [Memory Management](administration/memory-management.md)
* [Monitoring](administration/monitoring.md)
* [HTTP Proxy](administration/http-proxy.md)
+* [Hot Reload](administration/hot-reload.md)
* [Troubleshooting](administration/troubleshooting.md)
-
+
## Local Testing
* [Validating your Data and Structure](local-testing/validating-your-data-and-structure.md)
@@ -84,6 +85,7 @@
* [Docker Log Based Metrics](pipeline/inputs/docker-metrics.md)
* [Docker Events](pipeline/inputs/docker-events.md)
* [Dummy](pipeline/inputs/dummy.md)
+ * [Elasticsearch](pipeline/inputs/elasticsearch.md)
* [Exec](pipeline/inputs/exec.md)
* [Exec Wasi](pipeline/inputs/exec-wasi.md)
* [Fluent Bit Metrics](pipeline/inputs/fluentbit-metrics.md)
@@ -91,16 +93,21 @@
* [Head](pipeline/inputs/head.md)
* [HTTP](pipeline/inputs/http.md)
* [Health](pipeline/inputs/health.md)
+ * [Kafka](pipeline/inputs/kafka.md)
* [Kernel Logs](pipeline/inputs/kernel-logs.md)
+ * [Kubernetes Events](pipeline/inputs/kubernetes-events.md)
* [Memory Metrics](pipeline/inputs/memory-metrics.md)
* [MQTT](pipeline/inputs/mqtt.md)
* [Network I/O Log Based Metrics](pipeline/inputs/network-io-metrics.md)
* [NGINX Exporter Metrics](pipeline/inputs/nginx.md)
* [Node Exporter Metrics](pipeline/inputs/node-exporter-metrics.md)
+ * [Podman Metrics](pipeline/inputs/podman-metrics.md)
* [Process Log Based Metrics](pipeline/inputs/process.md)
+ * [Process Exporter Metrics](pipeline/inputs/process-exporter-metrics.md)
* [Prometheus Scrape Metrics](pipeline/inputs/prometheus-scrape-metrics.md)
* [Random](pipeline/inputs/random.md)
* [Serial Interface](pipeline/inputs/serial-interface.md)
+ * [Splunk](pipeline/inputs/splunk.md)
* [Standard Input](pipeline/inputs/standard-input.md)
* [StatsD](pipeline/inputs/statsd.md)
* [Syslog](pipeline/inputs/syslog.md)
@@ -108,6 +115,7 @@
* [Tail](pipeline/inputs/tail.md)
* [TCP](pipeline/inputs/tcp.md)
* [Thermal](pipeline/inputs/thermal.md)
+ * [UDP](pipeline/inputs/udp.md)
* [OpenTelemetry](pipeline/inputs/opentelemetry.md)
* [Windows Event Log](pipeline/inputs/windows-event-log.md)
* [Windows Event Log (winevtlog)](pipeline/inputs/windows-event-log-winevtlog.md)
@@ -127,6 +135,7 @@
* [GeoIP2 Filter](pipeline/filters/geoip2-filter.md)
* [Grep](pipeline/filters/grep.md)
* [Kubernetes](pipeline/filters/kubernetes.md)
+ * [Log to Metrics](pipeline/filters/log_to_metrics.md)
* [Lua](pipeline/filters/lua.md)
* [Parser](pipeline/filters/parser.md)
* [Record Modifier](pipeline/filters/record-modifier.md)
@@ -136,7 +145,9 @@
* [Nightfall](pipeline/filters/nightfall.md)
* [Rewrite Tag](pipeline/filters/rewrite-tag.md)
* [Standard Output](pipeline/filters/standard-output.md)
+ * [Sysinfo](pipeline/filters/sysinfo.md)
* [Throttle](pipeline/filters/throttle.md)
+ * [Type Converter](pipeline/filters/type-converter.md)
* [Tensorflow](pipeline/filters/tensorflow.md)
* [Wasm](pipeline/filters/wasm.md)
* [Outputs](pipeline/outputs/README.md)
@@ -147,6 +158,7 @@
* [Azure Blob](pipeline/outputs/azure\_blob.md)
* [Azure Data Explorer](pipeline/outputs/azure\_kusto.md)
* [Azure Log Analytics](pipeline/outputs/azure.md)
+ * [Azure Logs Ingestion API](pipeline/outputs/azure_logs_ingestion.md)
* [Counter](pipeline/outputs/counter.md)
* [Datadog](pipeline/outputs/datadog.md)
* [Elasticsearch](pipeline/outputs/elasticsearch.md)
@@ -154,6 +166,7 @@
* [FlowCounter](pipeline/outputs/flowcounter.md)
* [Forward](pipeline/outputs/forward.md)
* [GELF](pipeline/outputs/gelf.md)
+ * [Google Chronicle](pipeline/outputs/chronicle.md)
* [Google Cloud BigQuery](pipeline/outputs/bigquery.md)
* [HTTP](pipeline/outputs/http.md)
* [InfluxDB](pipeline/outputs/influxdb.md)
@@ -165,6 +178,7 @@
* [New Relic](pipeline/outputs/new-relic.md)
* [NULL](pipeline/outputs/null.md)
* [Observe](pipeline/outputs/observe.md)
+ * [Oracle Log Analytics](pipeline/outputs/oci-logging-analytics.md)
* [OpenSearch](pipeline/outputs/opensearch.md)
* [OpenTelemetry](pipeline/outputs/opentelemetry.md)
* [PostgreSQL](pipeline/outputs/postgresql.md)
@@ -178,6 +192,7 @@
* [Syslog](pipeline/outputs/syslog.md)
* [TCP & TLS](pipeline/outputs/tcp-and-tls.md)
* [Treasure Data](pipeline/outputs/treasure-data.md)
+ * [Vivo Exporter](pipeline/outputs/vivo-exporter.md)
* [WebSocket](pipeline/outputs/websocket.md)
## Stream Processing
diff --git a/about/fluentd-and-fluent-bit.md b/about/fluentd-and-fluent-bit.md
index 0cba6544e..f73a9c84d 100644
--- a/about/fluentd-and-fluent-bit.md
+++ b/about/fluentd-and-fluent-bit.md
@@ -1,33 +1,33 @@
---
-description: The Production Grade Ecosystem
+description: The Production Grade Telemetry Ecosystem
---
# Fluentd & Fluent Bit
-Logging and data processing in general can be complex, and at scale a bit more, that's why [Fluentd](https://www.fluentd.org) was born. Fluentd has become more than a simple tool, it has grown into a fullscale ecosystem that contains SDKs for different languages and sub-projects like [Fluent Bit](https://fluentbit.io).
+Telemetry data processing in general can be complex, and at scale a bit more, that's why [Fluentd](https://www.fluentd.org) was born. Fluentd has become more than a simple tool, it has grown into a fullscale ecosystem that contains SDKs for different languages and sub-projects like [Fluent Bit](https://fluentbit.io).
On this page, we will describe the relationship between the [Fluentd](http://fluentd.org) and [Fluent Bit](http://fluentbit.io) open source projects, as a summary we can say both are:
* Licensed under the terms of Apache License v2.0
-* Hosted projects by the [Cloud Native Computing Foundation \(CNCF\)](https://cncf.io)
-* Production Grade solutions: deployed **thousands** of times every single day, **millions** per **month**.
-* Community driven projects
-* Widely Adopted by the Industry: trusted by all major companies like AWS, Microsoft, Google Cloud and hundred of others.
-* Originally created by [Treasure Data](https://www.treasuredata.com).
+* **Graduated** Hosted projects by the [Cloud Native Computing Foundation (CNCF)](https://cncf.io)
+* Production Grade solutions: deployed **million** of times every single day.
+* **Vendor neutral** and community driven projects
+* Widely Adopted by the Industry: trusted by all major companies like AWS, Microsoft, Google Cloud and hundreds of others.
Both projects share a lot of similarities, [Fluent Bit](https://fluentbit.io) is fully designed and built on top of the best ideas of [Fluentd](https://www.fluentd.org) architecture and general design. Choosing which one to use depends on the end-user needs.
-The following table describes a comparison in different areas of the projects:
-
-| | Fluentd | Fluent Bit |
-| :--- | :--- | :--- |
-| Scope | Containers / Servers | Embedded Linux / Containers / Servers |
-| Language | C & Ruby | C |
-| Memory | ~40MB | ~650KB |
-| Performance | High Performance | High Performance |
-| Dependencies | Built as a Ruby Gem, it requires a certain number of gems. | Zero dependencies, unless some special plugin requires them. |
-| Plugins | More than 1000 plugins available | Around 70 plugins available |
-| License | [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0) | [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0) |
-
-Both [Fluentd](https://www.fluentd.org) and [Fluent Bit](https://fluentbit.io) can work as Aggregators or Forwarders, they both can complement each other or use them as standalone solutions.
-
+The following table describes a comparison of different areas of the projects:
+
+| | Fluentd | Fluent Bit |
+| ------------ | ----------------------------------------------------------------- | ----------------------------------------------------------------- |
+| Scope | Containers / Servers | Embedded Linux / Containers / Servers |
+| Language | C & Ruby | C |
+| Memory | > 60MB | \~1MB |
+| Performance | Medium Performance | High Performance |
+| Dependencies | Built as a Ruby Gem, it requires a certain number of gems. | Zero dependencies, unless some special plugin requires them. |
+| Plugins | More than 1000 external plugins are available | More than 100 built-in plugins are available |
+| License | [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0) | [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0) |
+
+Both [Fluentd](https://www.fluentd.org) and [Fluent Bit](https://fluentbit.io) can work as Aggregators or Forwarders, they both can complement each other or use them as standalone solutions.\
+\
+In the recent years, Cloud Providers switched from Fluentd to Fluent Bit for performance and compatibility reasons. Fluent Bit is now considered the **next generation** solution.
diff --git a/about/history.md b/about/history.md
index 0b8630a26..780aa3ccc 100644
--- a/about/history.md
+++ b/about/history.md
@@ -4,7 +4,7 @@ description: Every project has a story
# A Brief History of Fluent Bit
-On 2014, the [Fluentd](https://fluentd.org) team at [Treasure Data](https://www.treasuredata.com) forecasted the need of a lightweight log processor for constraint environments like Embedded Linux and Gateways, the project aimed to be part of the Fluentd Ecosystem and we called it [Fluent Bit](https://fluentbit.io), fully open source and available under the terms of the [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0).
+On 2014, the [Fluentd](https://www.fluentd.org/) team at [Treasure Data](https://www.treasuredata.com/) was forecasting the need for a lightweight log processor for constraint environments like Embedded Linux and Gateways, the project aimed to be part of the Fluentd Ecosystem; at that moment, Eduardo created [Fluent Bit](https://fluentbit.io/), a new open source solution written from scratch available under the terms of the [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0).\
-After the project was around for some time, it got some traction in the Embedded market but we also started getting requests for several features from the Cloud community like more inputs, filters, and outputs. Not so long after that, Fluent Bit becomes one of the preferred solutions to solve the logging challenges in Cloud environments.
+After the project was around for some time, it got more traction for normal Linux systems, also with the new containerized world, the Cloud Native community asked to extend the project scope to support more sources, filters, and destinations. Not so long after, Fluent Bit became one of the preferred solutions to solve the logging challenges in Cloud environments.
diff --git a/about/license.md b/about/license.md
index 5e67beda4..44bb3506e 100644
--- a/about/license.md
+++ b/about/license.md
@@ -4,7 +4,7 @@ description: Strong Commitment to the Openness and Collaboration
# License
-[Fluent Bit](http://fluentbit.io), including it core, plugins and tools are distributed under the terms of the [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0):
+[Fluent Bit](http://fluentbit.io), including its core, plugins and tools are distributed under the terms of the [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0):
```text
Apache License
diff --git a/about/what-is-fluent-bit.md b/about/what-is-fluent-bit.md
index 39df11e05..afb7d5aeb 100644
--- a/about/what-is-fluent-bit.md
+++ b/about/what-is-fluent-bit.md
@@ -2,18 +2,16 @@
description: Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
---
-# What is Fluent Bit ?
+# What is Fluent Bit?
-[Fluent Bit](http://fluentbit.io/) is an open source and multi-platform log processor tool which aims to be a generic Swiss knife for logs processing and distribution.
-Nowadays the number of sources of information in our environments is ever increasing. Handling data collection at scale is complex, and collecting and aggregating diverse data requires a specialized tool that can deal with:
-* Different sources of information
-* Different data formats
-* Data Reliability
-* Security
-* Flexible Routing
-* Multiple destinations
+[Fluent Bit](https://fluentbit.io) is an open-source telemetry agent specifically designed to efficiently handle the challenges of collecting and processing telemetry data across a wide range of environments, from constrained systems to complex cloud infrastructures. Managing telemetry data from various sources and formats can be a constant challenge, particularly when performance is a critical factor.
-[Fluent Bit](https://fluentbit.io) has been designed with performance and low resources consumption in mind.
+Rather than serving as a drop-in replacement, Fluent Bit enhances the observability strategy for your infrastructure by adapting and optimizing your existing logging layer, as well as metrics and traces processing. Furthermore, Fluent Bit supports a vendor-neutral approach, seamlessly integrating with other ecosystems such as Prometheus and OpenTelemetry. Trusted by major cloud providers, banks, and companies in need of a ready-to-use telemetry agent solution, Fluent Bit effectively manages diverse data sources and formats while maintaining optimal performance.
+Fluent Bit can be deployed as an edge agent for localized telemetry data handling or utilized as a central aggregator/collector for managing telemetry data across multiple sources and environments.
+
+[Fluent Bit](https://fluentbit.io) has been designed with performance and low resource consumption in mind.
+
+{% embed url="https://www.youtube.com/watch?v=3ELc1helke4" %}
diff --git a/administration/backpressure.md b/administration/backpressure.md
index f0881972e..14759f256 100644
--- a/administration/backpressure.md
+++ b/administration/backpressure.md
@@ -2,17 +2,17 @@
Under certain scenarios it is possible for logs or data to be ingested or created faster than the ability to flush it to some destinations. One such common scenario is when reading from big log files, especially with a large backlog, and dispatching the logs to a backend over the network, which takes time to respond. This generates backpressure leading to high memory consumption in the service.
-In order to avoid backpressure, Fluent Bit implements a mechanism in the engine that restricts the amount of data that an input plugin can ingest, this is done through the configuration parameter **Mem\_Buf\_Limit**.
+In order to avoid backpressure, Fluent Bit implements a mechanism in the engine that restricts the amount of data that an input plugin can ingest, this is done through the configuration parameters **Mem\_Buf\_Limit** and **storage.Max\_Chunks\_Up**.
-As described in the [Buffering](../concepts/buffering.md) concepts section, Fluent Bit offers a hybrid mode for data handling: in-memory and filesystem \(optional\).
+As described in the [Buffering](../concepts/buffering.md) concepts section, Fluent Bit offers two modes for data handling: in-memory only (default) and in-memory + filesystem \(optional\).
-In `memory` is always available and can be restricted with **Mem\_Buf\_Limit**. If memory reaches this limit and you reach a backpressure scenario, you will not be able to ingest more data until the data chunks that are in memory can be flushed.
+The default `storage.type memory` buffer can be restricted with **Mem\_Buf\_Limit**. If memory reaches this limit and you reach a backpressure scenario, you will not be able to ingest more data until the data chunks that are in memory can be flushed. The input will be paused and Fluent Bit will [emit](https://github.com/fluent/fluent-bit/blob/v2.0.0/src/flb_input_chunk.c#L1334) a `[warn] [input] {input name or alias} paused (mem buf overlimit)` log message. Depending on the input plugin in use, this might lead to discard incoming data \(e.g: TCP input plugin\). The tail plugin can handle pause without data loss; it will store its current file offset and resume reading later. When buffer memory is available, the input will resume collecting/accepting logs and Fluent Bit will [emit](https://github.com/fluent/fluent-bit/blob/v2.0.0/src/flb_input_chunk.c#L1277) a `[info] [input] {input name or alias} resume (mem buf overlimit)` message.
-Depending on the input plugin in use, this might lead to discard incoming data \(e.g: TCP input plugin\). This can be mitigated by configuring secondary storage on the filesystem using the `storage.type` of `filesystem` \(as described in [Buffering & Storage](buffering-and-storage.md)\). When the limit is reached, all the new data will be stored safely in the filesystem.
+This risk of data loss can be mitigated by configuring secondary storage on the filesystem using the `storage.type` of `filesystem` \(as described in [Buffering & Storage](buffering-and-storage.md)\). Initially, logs will be buffered to *both* memory and filesystem. When the `storage.max_chunks_up` limit is reached, all the new data will be stored safely only in the filesystem. Fluent Bit will stop enqueueing new data in memory and will only buffer to the filesystem. Please note that when `storage.type filesystem` is set, the `Mem_Buf_Limit` setting no longer has any effect, instead, the `[SERVICE]` level `storage.max_chunks_up` setting controls the size of the memory buffer.
## Mem\_Buf\_Limit
-This option is disabled by default and can be applied to all input plugins. Let's explain its behavior using the following scenario:
+This option is disabled by default and can be applied to all input plugins. Please note that `Mem_Buf_Limit` only applies with the default `storage.type memory`. Let's explain its behavior using the following scenario:
* Mem\_Buf\_Limit is set to 1MB \(one megabyte\)
* input plugin tries to append 700KB
@@ -36,8 +36,32 @@ After some time, usually measured in seconds, if the scheduler was able to flush
* If the plugin is paused, it invokes a **resume** callback
* input plugin can continue appending more data
+## storage.max\_chunks\_up
+
+Please note that when `storage.type filesystem` is set, the `Mem_Buf_Limit` setting no longer has any effect, instead, the `[SERVICE]` level `storage.max_chunks_up` setting controls the size of the memory buffer.
+
+The setting behaves similarly to the above scenario with `Mem_Buf_Limit` when the non-default `storage.pause_on_chunks_overlimit` is enabled.
+
+When (default) `storage.pause_on_chunks_overlimit` is disabled, the input will not pause when the memory limit is reached. Instead, it will switch to only buffering logs in the filesystem. The disk spaced used for filesystem buffering can be limited with `storage.total_limit_size`.
+
+Please consule the [Buffering & Storage](buffering-and-storage.md) docs for more information.
+
## About pause and resume Callbacks
Each plugin is independent and not all of them implements the **pause** and **resume** callbacks. As said, these callbacks are just a notification mechanism for the plugin.
-One example of a plugin that implements these callbacks and keeps state correctly is the [Tail Input](../pipeline/inputs/tail.md) plugin. When the **pause** callback is triggered, it pauses its collectors and stops appending data. Upon **resume**, it resumes the collectors and continues ingesting data.
+One example of a plugin that implements these callbacks and keeps state correctly is the [Tail Input](../pipeline/inputs/tail.md) plugin. When the **pause** callback is triggered, it pauses its collectors and stops appending data. Upon **resume**, it resumes the collectors and continues ingesting data. Tail will track the current file offset when it pauses and resume at the same position. If the file has not been deleted or moved, it can still be read.
+
+With the default `storage.type memory` and `Mem_Buf_Limit`, the following log messages will be emitted for pause and resume:
+
+```
+[warn] [input] {input name or alias} paused (mem buf overlimit)
+[info] [input] {input name or alias} resume (mem buf overlimit)
+```
+
+With `storage.type filesystem` and `storage.max_chunks_up`, the following log messages will be emitted for pause and resume:
+
+```
+[input] {input name or alias} paused (storage buf overlimit
+[input] {input name or alias} resume (storage buf overlimit
+```
diff --git a/administration/buffering-and-storage.md b/administration/buffering-and-storage.md
index 12750a2db..d5153e917 100644
--- a/administration/buffering-and-storage.md
+++ b/administration/buffering-and-storage.md
@@ -14,6 +14,16 @@ Understanding the chunks, buffering and backpressure concepts is critical for a
When an input plugin \(source\) emits records, the engine groups the records together in a _Chunk_. A Chunk size usually is around 2MB. By configuration, the engine decides where to place this Chunk, the default is that all chunks are created only in memory.
+#### Irrecoverable Chunks
+
+There are two scenarios where fluent-bit marks chunks as irrecoverable:
+
+* When Fluent Bit encounters a bad layout in a chunk. A bad layout is a chunk that does not conform to the expected format. [Chunk definition](https://github.com/fluent/fluent-bit/blob/master/CHUNKS.md)
+
+* When Fluent Bit encounters an incorrect or invalid chunk header size.
+
+In both scenarios Fluent-Bit will log an error message and then discard the irrecoverable chunks.
+
#### Buffering and Memory
As mentioned above, the Chunks generated by the engine are placed in memory but this is configurable.
@@ -105,6 +115,7 @@ The Service section refers to the section defined in the main [configuration fil
| storage.max\_chunks\_up | If the input plugin has enabled `filesystem` storage type, this property sets the maximum number of Chunks that can be `up` in memory. *This is the setting to use to control memory usage when you enable `storage.type filesystem`*. | 128 |
| storage.backlog.mem\_limit | If _storage.path_ is set, Fluent Bit will look for data chunks that were not delivered and are still in the storage layer, these are called _backlog_ data. _Backlog chunks_ are filesystem chunks that were left over from a previous Fluent Bit run; chunks that could not be sent before exit that Fluent Bit will pick up when restarted. Fluent Bit will check the `storage.backlog.mem_limit` value against the current memory usage from all `up` chunks for the input. If the `up` chunks currently consume less memory than the limit, it will bring the _backlog_ chunks up into memory so they can be sent by outputs. | 5M |
| storage.metrics | If `http_server` option has been enabled in the main `[SERVICE]` section, this option registers a new endpoint where internal metrics of the storage layer can be consumed. For more details refer to the [Monitoring](monitoring.md) section. | off |
+| storage.delete_irrecoverable_chunks | When enabled, [irrecoverable chunks](./buffering-and-storage.md#irrecoverable-chunks) will be deleted during runtime, and any other irrecoverable chunk located in the configured storage path directory will be deleted when Fluent-Bit starts. | Off |
a Service section will look like this:
diff --git a/administration/configuring-fluent-bit/README.md b/administration/configuring-fluent-bit/README.md
index 2e7b7d714..e1a73a507 100644
--- a/administration/configuring-fluent-bit/README.md
+++ b/administration/configuring-fluent-bit/README.md
@@ -3,7 +3,7 @@
Currently, Fluent Bit supports two configuration formats:
* [Classic mode](classic-mode/README.md).
-* [Yaml](yaml/README.md). (YAML configuration is tech preview so not recommended for production.)
+* [Yaml](yaml/README.md). (YAML configuration is production ready since Fluent Bit 2.0.)
## CLI flags
diff --git a/administration/configuring-fluent-bit/classic-mode/configuration-file.md b/administration/configuring-fluent-bit/classic-mode/configuration-file.md
index e8a11aadf..60edb21e2 100644
--- a/administration/configuring-fluent-bit/classic-mode/configuration-file.md
+++ b/administration/configuring-fluent-bit/classic-mode/configuration-file.md
@@ -39,6 +39,7 @@ The _Service_ section defines global properties of the service, the keys availab
| scheduler.cap | Set a maximum retry time in second. The property is supported from v1.8.7. | 2000 |
| scheduler.base | Set a base of exponential backoff. The property is supported from v1.8.7. | 5 |
| json.convert_nan_to_null | If enabled, NaN is converted to null when fluent-bit converts msgpack to json. | false |
+| sp.convert_from_str_to_num | If enabled, Stream processor converts from number string to number type. | true |
The following is an example of a _SERVICE_ section:
@@ -138,7 +139,7 @@ The following configuration file example demonstrates how to collect CPU metrics
## Visualize
-You can also visualize Fluent Bit INPUT, FILTER, and OUTPUT configuration via [https://cloud.calyptia.com](https://cloud.calyptia.com/visualizer)
+You can also visualize Fluent Bit INPUT, FILTER, and OUTPUT configuration via [Calyptia](https://calyptia.com/free-trial)
![](../../../.gitbook/assets/image.png)
diff --git a/administration/configuring-fluent-bit/classic-mode/format-schema.md b/administration/configuring-fluent-bit/classic-mode/format-schema.md
index 5e38ba962..30efc1a6f 100644
--- a/administration/configuring-fluent-bit/classic-mode/format-schema.md
+++ b/administration/configuring-fluent-bit/classic-mode/format-schema.md
@@ -27,6 +27,7 @@ A section is defined by a name or title inside brackets. Looking at the example
* Multiple sections can exist on the same file.
* A section is expected to have comments and entries, it cannot be empty.
* Any commented line under a section, must be indented too.
+* End-of-line comments are not supported, only full-line comments.
## Entries: Key/Value
diff --git a/administration/configuring-fluent-bit/classic-mode/record-accessor.md b/administration/configuring-fluent-bit/classic-mode/record-accessor.md
index 81fffd20c..9b950efa8 100644
--- a/administration/configuring-fluent-bit/classic-mode/record-accessor.md
+++ b/administration/configuring-fluent-bit/classic-mode/record-accessor.md
@@ -95,3 +95,23 @@ Fluent Bit v1.x.x
{"date":1599862267.483692,"log":"message 4","labels":{"color":"blue"}}
```
+### Limitations of record_accessor templating
+
+The Fluent Bit record_accessor library has a limitation in the characters that can separate template variables- only dots and commas (`.` and `,`) can come after a template variable. This is because the templating library must parse the template and determine the end of a variable.
+
+The following would be invalid templates because the two template variables are not separated by commas or dots:
+
+- `$TaskID-$ECSContainerName`
+- `$TaskID/$ECSContainerName`
+- `$TaskID_$ECSContainerName`
+- `$TaskIDfooo$ECSContainerName`
+
+However, the following are valid:
+- `$TaskID.$ECSContainerName`
+- `$TaskID.ecs_resource.$ECSContainerName`
+- `$TaskID.fooo.$ECSContainerName`
+
+And the following are valid since they only contain one template variable with nothing after it:
+- `fooo$TaskID`
+- `fooo____$TaskID`
+- `fooo/bar$TaskID`
diff --git a/administration/configuring-fluent-bit/classic-mode/upstream-servers.md b/administration/configuring-fluent-bit/classic-mode/upstream-servers.md
index 1696bba8e..11c95890a 100644
--- a/administration/configuring-fluent-bit/classic-mode/upstream-servers.md
+++ b/administration/configuring-fluent-bit/classic-mode/upstream-servers.md
@@ -27,7 +27,7 @@ A _Node_ might contain additional configuration keys required by the plugin, on
In addition to the properties defined in the table above, the network operations against a defined node can optionally be done through the use of TLS for further encryption and certificates use.
-The TLS options available are described in the [TLS/SSL](../../security.md) section and can be added to the any _Node_ section.
+The TLS options available are described in the [TLS/SSL](../../transport-security.md) section and can be added to the any _Node_ section.
### Configuration File Example
diff --git a/administration/configuring-fluent-bit/classic-mode/variables.md b/administration/configuring-fluent-bit/classic-mode/variables.md
index 478682ab2..11e32a56b 100644
--- a/administration/configuring-fluent-bit/classic-mode/variables.md
+++ b/administration/configuring-fluent-bit/classic-mode/variables.md
@@ -10,6 +10,12 @@ ${MY_VARIABLE}
When Fluent Bit starts, the configuration reader will detect any request for `${MY_VARIABLE}` and will try to resolve its value.
+When Fluent Bit is running under systemd (using the official packages), environment variables can be set in the following files:
+* `/etc/default/fluent-bit` (Debian based system)
+* `/etc/sysconfig/fluent-bit` (Others)
+
+These files are ignored if they do not exist.
+
## Example
Create the following configuration file \(`fluent-bit.conf`\):
diff --git a/administration/configuring-fluent-bit/yaml/README.md b/administration/configuring-fluent-bit/yaml/README.md
index 79894e320..ce6e7f8c4 100644
--- a/administration/configuring-fluent-bit/yaml/README.md
+++ b/administration/configuring-fluent-bit/yaml/README.md
@@ -1,7 +1,3 @@
# Fluent Bit YAML configuration
-Since version 1.9, Fluent Bit supports the use of YAML for configuration.
-
-{% hint style="info" %}
-YAML configuration is tech preview so not recommended for production.
-{% endhint %}
+YAML configuration feature was introduced since FLuent Bit version 1.9 as experimental, and it is production ready since Fluent Bit 2.0.
diff --git a/administration/configuring-fluent-bit/yaml/configuration-file.md b/administration/configuring-fluent-bit/yaml/configuration-file.md
index 6a5e882cc..4e25144d3 100644
--- a/administration/configuring-fluent-bit/yaml/configuration-file.md
+++ b/administration/configuring-fluent-bit/yaml/configuration-file.md
@@ -6,22 +6,26 @@ description: This page describes the yaml configuration file used by Fluent Bit
One of the ways to configure Fluent Bit is using a YAML configuration file that works at a global scope.
-The yaml configuration file supports the following sections:
+The YAML configuration file supports the following sections:
* Env
+* Includes
* Service
* Pipeline
* Inputs
* Filters
* Outputs
+The YAML configuration file does not support the following sections yet:
+* Parsers
+
{% hint style="info" %}
-YAML configuration is used in the smoke tests for containers so an always-correct up-to-date example is here: .
+YAML configuration is used in the smoke tests for containers, so an always-correct up-to-date example is here: .
{% endhint %}
## Env
-The _env_ section allows to configure variables that will be used later on this configuration file.
+The _env_ section allows the definition of configuration variables that will be used later in the configuration file.
Example:
@@ -37,28 +41,45 @@ service:
http_server: on
```
+
+
+## Includes
+
+The _includes_ section allows the files to be merged into the YAML configuration to be identified as a list of filenames. If no path is provided, then the file is assumed to be in a folder relative to the file referencing it.
+
+Example:
+
+```yaml
+# defining file(s) to include into the current configuration. This includes illustrating using a relative path reference
+includes:
+ - inclusion-1.yaml
+ - subdir/inclusion-2.yaml
+
+```
+
## Service
-The _service_ section defines global properties of the service, the keys available as of this version are described in the following table:
+The _service_ section defines the global properties of the service. The Service keys available as of this version are described in the following table:
| Key | Description | Default Value |
| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- |
| flush | Set the flush time in `seconds.nanoseconds`. The engine loop uses a Flush timeout to define when is required to flush the records ingested by input plugins through the defined output plugins. | 5 |
-| grace | Set the grace time in `seconds` as Integer value. The engine loop uses a Grace timeout to define wait time on exit | 5 |
-| daemon | Boolean value to set if Fluent Bit should run as a Daemon (background) or not. Allowed values are: yes, no, on and off. note: If you are using a Systemd based unit as the one we provide in our packages, do not turn on this option. | Off |
-| dns.mode | Set the primary transport layer protocol used by the asynchronous DNS resolver which can be overridden on a per plugin basis | UDP |
-| log_file | Absolute path for an optional log file. By default all logs are redirected to the standard error interface (stderr). | |
-| log_level | Set the logging verbosity level. Allowed values are: off, error, warn, info, debug and trace. Values are accumulative, e.g: if 'debug' is set, it will include error, warning, info and debug. Note that _trace_ mode is only available if Fluent Bit was built with the _WITH\_TRACE_ option enabled. | info |
-| parsers_file | Path for a `parsers` configuration file. Multiple Parsers_File entries can be defined within the section. | |
-| plugins_file | Path for a `plugins` configuration file. A _plugins_ configuration file allows to define paths for external plugins, for an example [see here](https://github.com/fluent/fluent-bit/blob/master/conf/plugins.conf). | |
+| grace | Set the grace time in `seconds` as an Integer value. The engine loop uses a Grace timeout to define the wait time on exit | 5 |
+| daemon | Boolean value to set if Fluent Bit should run as a Daemon (background) or not. Allowed values are: yes, no, on, and off. note: If you are using a Systemd based unit like the one we provide in our packages, do not turn on this option. | Off |
+| dns.mode | Sets the primary transport layer protocol used by the asynchronous DNS resolver, which can be overridden on a per plugin basis | UDP |
+| log_file | Absolute path for an optional log file. By default, all logs are redirected to the standard error interface (stderr). | |
+| log_level | Set the logging verbosity level. Allowed values are: off, error, warn, info, debug and trace. Values are accumulative, e.g., if 'debug' is set, it will include error, warning, info, and debug. Note that _trace_ mode is only available if Fluent Bit was built with the _WITH\_TRACE_ option enabled. | info |
+| parsers_file | Path for a `parsers` configuration file. Only a single entry is currently supported. | |
+| plugins_file | Path for a `plugins` configuration file. A _plugins_ configuration file allows the definition of paths for external plugins; for an example, [see here](https://github.com/fluent/fluent-bit/blob/master/conf/plugins.conf). | |
| streams_file | Path for the Stream Processor configuration file. To learn more about Stream Processing configuration go [here](../../../stream-processing/introduction.md). | |
| http_server | Enable built-in HTTP Server | Off |
| http_listen | Set listening interface for HTTP Server when it's enabled | 0.0.0.0 |
| http_port | Set TCP Port for the HTTP Server | 2020 |
-| coro_stack_size | Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don't set too small value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. | 24576 |
-| scheduler.cap | Set a maximum retry time in second. The property is supported from v1.8.7. | 2000 |
-| scheduler.base | Set a base of exponential backoff. The property is supported from v1.8.7. | 5 |
+| coro_stack_size | Set the coroutines stack size in bytes. The value must be greater than the page size of the running system. Don't set too small a value (say 4096), or coroutine threads can overrun the stack buffer. Do not change the default value of this parameter unless you know what you are doing. | 24576 |
+| scheduler.cap | Set a maximum retry time in seconds. The property is supported from v1.8.7. | 2000 |
+| scheduler.base | Sets the base of exponential backoff. The property is supported from v1.8.7. | 5 |
| json.convert_nan_to_null | If enabled, NaN is converted to null when fluent-bit converts msgpack to json. | false |
+| sp.convert_from_str_to_num | If enabled, Stream processor converts from number string to number type. | true |
The following is an example of a _service_ section:
@@ -85,6 +106,41 @@ pipeline:
...
```
+Each of the subsections for _inputs_, _filters_ and _outputs_ constitutes an array of maps that has the parameters for each. Most properties are either simple strings or numbers so can be define directly, ie:
+
+```yaml
+pipeline:
+ inputs:
+ - name: tail
+ tag: syslog
+ path: /var/log/syslog
+ - name: http
+ tag: http_server
+ port: 8080
+```
+
+This pipelinme consists of two _inputs_; a tail plugin and an http server plugin. Each plugin has its own map in the array of _inputs_ consisting of simple properties. To use more advanced properties that consist of multiple values the property itself can be defined using an array, ie: the _record_ and _allowlist_key_ properties for the _record_modifier_ _filter_:
+
+```
+pipeline:
+ inputs:
+ - name: tail
+ tag: syslog
+ path: /var/log/syslog
+ filters:
+ - name: record_modifier
+ match: syslog
+ record:
+ - powered_by calyptia
+ - name: record_modifier
+ match: syslog
+ allowlist_key:
+ - powered_by
+ - message
+```
+
+In the cases where each value in a list requires two values they must be separated by a space, such as in the _record_ property for the _record_modifier_ filter.
+
### Input
An _input_ section defines a source (related to an input plugin). Here we will describe the base configuration for each _input_ section. Note that each input plugin may add it own configuration keys:
@@ -95,7 +151,7 @@ An _input_ section defines a source (related to an input plugin). Here we will d
| Tag | Tag name associated to all records coming from this plugin. |
| Log_Level | Set the plugin's logging verbosity level. Allowed values are: off, error, warn, info, debug and trace. Defaults to the _SERVICE_ section's _Log_Level._ |
-The _Name_ is mandatory and it let Fluent Bit know which input plugin should be loaded. The _Tag_ is mandatory for all plugins except for the _input forward_ plugin (as it provides dynamic tags).
+The _Name_ is mandatory and it lets Fluent Bit know which input plugin should be loaded. The _Tag_ is mandatory for all plugins except for the _input forward_ plugin (as it provides dynamic tags).
#### Example input
@@ -110,16 +166,16 @@ pipeline:
### Filter
-A _filter_ section defines a filter (related to an filter plugin). Here we will describe the base configuration for each _filter_ section. Note that each filter plugin may add it own configuration keys:
+A _filter_ section defines a filter (related to a filter plugin). Here we will describe the base configuration for each _filter_ section. Note that each filter plugin may add its own configuration keys:
-| Key | Description |
-|------------ |-------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Name | Name of the filter plugin. Defined as a subsection of the _filters_ section. |
-| Match | A pattern to match against the tags of incoming records. It's case sensitive and support the star (\*) character as a wildcard. |
-| Match_Regex | A regular expression to match against the tags of incoming records. Use this option if you want to use the full regex syntax. |
+| Key | Description |
+| ----------- | ------------------------------------------------------------ |
+| Name | Name of the filter plugin. Defined as a subsection of the _filters_ section. |
+| Match | A pattern to match against the tags of incoming records. It's case-sensitive and supports the star (\*) character as a wildcard. |
+| Match_Regex | A regular expression to match against the tags of incoming records. Use this option if you want to use the full regex syntax. |
| Log_Level | Set the plugin's logging verbosity level. Allowed values are: off, error, warn, info, debug and trace. Defaults to the _SERVICE_ section's _Log_Level._ |
-The _Name_ is mandatory and it let Fluent Bit know which filter plugin should be loaded. The _Match_ or _Match_Regex_ is mandatory for all plugins. If both are specified, _Match_Regex_ takes precedence.
+The _Name_ is mandatory and it lets Fluent Bit know which filter plugin should be loaded. The _Match_ or _Match_Regex_ is mandatory for all plugins. If both are specified, _Match_Regex_ takes precedence.
#### Example filter
@@ -135,14 +191,14 @@ pipeline:
### Output
-The _outputs_ section specify a destination that certain records should follow after a Tag match. Currently, Fluent Bit can route up to 256 _OUTPUT_ plugins. The configuration support the following keys:
+The _outputs_ section specify a destination that certain records should follow after a Tag match. Currently, Fluent Bit can route up to 256 _OUTPUT_ plugins. The configuration supports the following keys:
-| Key | Description |
-| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Name | Name of the output plugin. Defined as a subsection of the _outputs_ section. |
-| Match | A pattern to match against the tags of incoming records. It's case sensitive and support the star (\*) character as a wildcard. |
-| Match_Regex | A regular expression to match against the tags of incoming records. Use this option if you want to use the full regex syntax. |
-| Log_Level | Set the plugin's logging verbosity level. Allowed values are: off, error, warn, info, debug and trace. Defaults to the _SERVICE_ section's _Log_Level._ |
+| Key | Description |
+| ----------- | ------------------------------------------------------------ |
+| Name | Name of the output plugin. Defined as a subsection of the _outputs_ section. |
+| Match | A pattern to match against the tags of incoming records. It's case-sensitive and supports the star (\*) character as a wildcard. |
+| Match_Regex | A regular expression to match against the tags of incoming records. Use this option if you want to use the full regex syntax. |
+| Log_Level | Set the plugin's logging verbosity level. Allowed values are: off, error, warn, info, debug and trace. The output log level defaults to the _SERVICE_ section's _Log_Level._ |
#### Example output
@@ -173,3 +229,53 @@ pipeline:
- name: stdout
match: 'my*cpu'
```
+
+## Processors
+
+In recent versions of Fluent-Bit, the input and output plugins can run in separate threads. In Fluent-Bit 2.1.2, we have implemented a new interface called "processor" to extend the processing capabilities in input and output plugins directly without routing the data. This interface allows users to apply data transformations and filtering to incoming data records before they are processed further in the pipeline.
+
+This functionality is only exposed in YAML configuration and not in classic configuration mode due to the restriction of nested levels of configuration.
+
+[Processor example](configuration-file.md#example-using-processors)
+
+### Example: Using processors.
+
+The following configuration file example demonstrates the use of processors to change the log record in the input plugin section by adding a new key "hostname" with the value "monox", and we use lua to append the tag to the log record. Also in the ouput plugin section we added a new key named "output" with the value "new data". All these without the need of routing the logs further in the pipeline.
+
+```yaml
+ service:
+ log_level: info
+ http_server: on
+ http_listen: 0.0.0.0
+ http_port: 2021
+ pipeline:
+ inputs:
+ - name: random
+ tag: test-tag
+ interval_sec: 1
+ processors:
+ logs:
+ - name: modify
+ add: hostname monox
+ - name: lua
+ call: append_tag
+ code: |
+ function append_tag(tag, timestamp, record)
+ new_record = record
+ new_record["tag"] = tag
+ return 1, timestamp, new_record
+ end
+ outputs:
+ - name: stdout
+ match: '*'
+ processors:
+ logs:
+ - name: lua
+ call: add_field
+ code: |
+ function add_field(tag, timestamp, record)
+ new_record = record
+ new_record["output"] = "new data"
+ return 1, timestamp, new_record
+ end
+```
diff --git a/administration/hot-reload.md b/administration/hot-reload.md
new file mode 100644
index 000000000..5a60b21c6
--- /dev/null
+++ b/administration/hot-reload.md
@@ -0,0 +1,65 @@
+---
+description: Enable hot reload through SIGHUP signal or an HTTP endpoint
+---
+
+# Hot Reload
+
+Fluent Bit supports the hot reloading feature when enabled via the configuration file or command line with `-Y` or `--enable-hot-reload` option.
+
+## Getting Started
+
+To get started with reloading via HTTP, the first step is to enable the HTTP Server from the configuration file:
+
+```toml
+[SERVICE]
+ HTTP_Server On
+ HTTP_Listen 0.0.0.0
+ HTTP_PORT 2020
+ Hot_Reload On
+...
+```
+
+The above configuration snippet will enable the HTTP endpoint for hot reloading.
+
+## How to reload
+
+### Via HTTP
+
+Hot reloading can be kicked via HTTP endpoints that are:
+
+* `PUT /api/v2/reload`
+* `POST /api/v2/reload`
+
+If users don't enable the hot reloading feature, hot reloading via these endpoints will not work.
+
+For using curl to reload Fluent Bit, users must specify an empty request body as:
+
+```text
+$ curl -X POST -d '{}' localhost:2020/api/v2/reload
+```
+
+### Via Signal
+
+Hot reloading also can be kicked via `SIGHUP`.
+
+`SIGHUP` signal is not supported on Windows. So, users can't enable this feature on Windows.
+
+## How to confirm reloaded or not
+
+### via HTTP
+
+The number of hot reloaded count can be obtained via the HTTP endpoint that is:
+
+* `GET /api/v2/reload`
+
+The endpoint returns the count of hot-reloaded as follows:
+
+```json
+{"hot_reload_count":3}
+```
+
+The default value of that number is 0.
+
+## Limitations
+
+The hot reloading feature is currently working on Linux, macOS and Windows.
diff --git a/administration/monitoring.md b/administration/monitoring.md
index c95501288..59ea80531 100644
--- a/administration/monitoring.md
+++ b/administration/monitoring.md
@@ -92,9 +92,14 @@ Fluent Bit aims to expose useful interfaces for monitoring, as of Fluent Bit v0.
| /api/v1/metrics/prometheus | Internal metrics per loaded plugin ready to be consumed by a Prometheus Server | Prometheus Text 0.0.4 |
| /api/v1/storage | Get internal metrics of the storage layer / buffered data. This option is enabled only if in the `SERVICE` section the property `storage.metrics` has been enabled | JSON |
| /api/v1/health | Fluent Bit health check result | String |
+| /api/v2/metrics | Internal metrics per loaded plugin | [cmetrics text format](https://github.com/fluent/cmetrics) |
+| /api/v2/metrics/prometheus | Internal metrics per loaded plugin ready to be consumed by a Prometheus Server | Prometheus Text 0.0.4 |
+| /api/v2/reload | Execute hot reloading or get the status of hot reloading. For more details, please refer to the [hot-reloading documentation](hot-reload.md). | JSON |
### Metric Descriptions
+#### For v1 metrics
+
The following are detailed descriptions for the metrics outputted in prometheus format by `/api/v1/metrics/prometheus`.
The following definitions are key to understand:
@@ -137,6 +142,58 @@ The following are detailed descriptions for the metrics outputted in JSON format
| input_chunks.{plugin name}.chunks.busy | "Busy" chunks are chunks that are being processed/sent by outputs and are not eligible to have new data appended. | chunks |
| input_chunks.{plugin name}.chunks.busy_size | The sum of the byte size of each chunk which is currently marked as busy. | bytes |
+#### For v2 metrics
+
+The following are detailed descriptions for the metrics outputted in prometheus format by `/api/v2/metrics/prometheus` or `/api/v2/metrics`.
+
+The following definitions are key to understand:
+* record: a single message collected from a source, such as a single long line in a file.
+* chunk: Fluent Bit input plugin instances ingest log records and store them in chunks. A batch of records in a chunk are tracked together as a single unit; the Fluent Bit engine attempts to fit records into chunks of at most 2 MB, but the size can vary at runtime. Chunks are then sent to an output. An output plugin instance can either successfully send the full chunk to the destination and mark it as successful, or it can fail the chunk entirely if an unrecoverable error is encountered, or it can ask for the chunk to be retried.
+
+| Metric Name | Labels | Description | Type | Unit |
+|--------------------------------------------|-------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|---------|
+| fluentbit\_input\_bytes\_total | name: the name or alias for the input instance | The number of bytes of log records that this input instance has successfully ingested | counter | bytes |
+| fluentbit\_input\_records\_total | name: the name or alias for the input instance | The number of log records this input has successfully ingested | counter | records |
+| fluentbit\_filter\_bytes\_total | name: the name or alias for the filter instance | The number of bytes of log records that this filter instance has successfully ingested | counter | bytes |
+| fluentbit\_filter\_records\_total | name: the name or alias for the filter instance | The number of log records this filter has successfully ingested | counter | records |
+| fluentbit\_filter\_added\_records\_total | name: the name or alias for the filter instance | The number of log records that have been added by the filter. This means they added into the data pipeline. | counter | records |
+| fluentbit\_filter\_dropped\_records\_total | name: the name or alias for the filter instance | The number of log records that have been dropped by the filter. This means they removed from the data pipeline. | counter | records |
+| fluentbit\_output\_dropped\_records\_total | name: the name or alias for the output instance | The number of log records that have been dropped by the output. This means they met an unrecoverable error or retries expired for their chunk. | counter | records |
+| fluentbit\_output\_errors\_total | name: the name or alias for the output instance | The number of chunks that have faced an error (either unrecoverable or retriable). This is the number of times a chunk has failed, and does not correspond with the number of error messages you see in the Fluent Bit log output. | counter | chunks |
+| fluentbit\_output\_proc\_bytes\_total | name: the name or alias for the output instance | The number of bytes of log records that this output instance has *successfully* sent. This is the total byte size of all unique chunks sent by this output. If a record is not sent due to some error, then it will not count towards this metric. | counter | bytes |
+| fluentbit\_output\_proc\_records\_total | name: the name or alias for the output instance | The number of log records that this output instance has *successfully* sent. This is the total record count of all unique chunks sent by this output. If a record is not successfully sent, it does not count towards this metric. | counter | records |
+| fluentbit\_output\_retried\_records\_total | name: the name or alias for the output instance | The number of log records that experienced a retry. Note that this is calculated at the chunk level, the count increased when an entire chunk is marked for retry. An output plugin may or may not perform multiple actions that generate many error messages when uploading a single chunk. | counter | records |
+| fluentbit\_output\_retries\_failed\_total | name: the name or alias for the output instance | The number of times that retries expired for a chunk. Each plugin configures a Retry\_Limit which applies to chunks. Once the Retry\_Limit has been reached for a chunk it is discarded and this metric is incremented. | counter | chunks |
+| fluentbit\_output\_retries\_total | name: the name or alias for the output instance | The number of times this output instance requested a retry for a chunk. | counter | chunks |
+| fluentbit\_uptime | hostname: the hostname on running fluent-bit | The number of seconds that Fluent Bit has been running. | counter | seconds |
+| fluentbit\_process\_start\_time\_seconds | hostname: the hostname on running fluent-bit | The Unix Epoch time stamp for when Fluent Bit started. | gauge | seconds |
+| fluentbit\_build\_info | hostname: the hostname, version: the version of fluent-bit, os: OS type | Build version information. The returned value is originated from initializing the Unix Epoch time stamp of config context. | gauge | seconds |
+| fluentbit\_hot\_reloaded\_times | hostname: the hostname on running fluent-bit | Collect the count of hot reloaded times. | gauge | seconds |
+
+The following are detailed descriptions for the metrics which is collected by storage layer.
+
+
+| Metric Name | Labels | Description | Type | Unit |
+|-------------------------------------------------|-------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|---------|
+| fluentbit\_input\_chunks.storage\_chunks | None | The total number of chunks of records that Fluent Bit is currently buffering | gauge | chunks |
+| fluentbit\_storage\_mem\_chunk | None | The total number of chunks that are buffered in memory at this time. Note that chunks can be both in memory and on the file system at the same time. | gauge | chunks |
+| fluentbit\_storage\_fs\_chunks | None | The total number of chunks saved to the filesystem. | gauge | chunks |
+| fluentbit\_storage\_fs\_chunks\_up | None | A chunk is "up" if it is in memory. So this is the count of chunks that are both in filesystem and in memory. | gauge | chunks |
+| fluentbit\_storage\_fs\_chunks\_down | None | The count of chunks that are "down" and thus are only in the filesystem. | gauge | chunks |
+| fluentbit\_storage\_fs\_chunks\_busy | None | The total number of chunks are in a busy state. | gauge | chunks |
+| fluentbit\_storage\_fs\_chunks\_busy\_bytes | None | The total bytes of chunks are in a busy state. | gauge | bytes |
+| | | | | |
+| fluentbit\_input\_storage\_overlimit | name: the name or alias for the input instance | Is this input instance over its configured Mem\_Buf\_Limit? | gauge | boolean |
+| fluentbit\_input\_storage\_memory\_bytes | name: the name or alias for the input instance | The size of memory that this input is consuming to buffer logs in chunks. | gauge | bytes |
+| | | | | |
+| fluentbit\_input\_storage\_chunks | name: the name or alias for the input instance | The current total number of chunks owned by this input instance. | gauge | chunks |
+| fluentbit\_input\_storage\_chunks\_up | name: the name or alias for the input instance | The current number of chunks that are "up" in memory for this input. Chunks that are "up" will also be in the filesystem layer as well if filesystem storage is enabled. | gauge | chunks |
+| fluentbit\_input\_storage\_chunks\_down | name: the name or alias for the input instance | The current number of chunks that are "down" in the filesystem for this input. | gauge | chunks |
+| fluentbit\_input\_storage\_chunks\_busy | name: the name or alias for the input instance | "Busy" chunks are chunks that are being processed/sent by outputs and are not eligible to have new data appended. | gauge | chunks |
+| fluentbit\_input\_storage\_chunks\_busy\_bytes | name: the name or alias for the input instance | The sum of the byte size of each chunk which is currently marked as busy. | gauge | bytes |
+| | | | | |
+| fluentbit\_output\_upstream\_total\_connections | name: the name or alias for the output instance | The sum of the connection count of each output plugins. | gauge | bytes |
+| fluentbit\_output\_upstream\_busy\_connections | name: the name or alias for the output instance | The sum of the connection count in a busy state of each output plugins. | gauge | bytes |
### Uptime Example
@@ -322,9 +379,9 @@ If (HC_Errors_Count > 5) OR (HC_Retry_Failure_Count > 5) IN 5 seconds is TRUE, t
If (HC_Errors_Count > 5) OR (HC_Retry_Failure_Count > 5) IN 5 seconds is FALSE, then it's healthy.
-## Calyptia Cloud
+## Calyptia
-[Calyptia Cloud](https://cloud.calyptia.com) is a hosted service that allows you to monitor your Fluent Bit agents including data flow, metrics and configurations.
+[Calyptia](https://calyptia.com/free-trial) is a hosted service that allows you to monitor your Fluent Bit agents including data flow, metrics and configurations.
![](../.gitbook/assets/image-19-.png)
@@ -332,8 +389,8 @@ If (HC_Errors_Count > 5) OR (HC_Retry_Failure_Count > 5) IN 5 seconds is FALSE,
Register your Fluent Bit agent will take **less than one minute**, steps:
-* Go to [cloud.calyptia.com](https://cloud.calyptia.com) and sign-in
-* On the left menu click on [Settings](https://cloud.calyptia.com/settings) and generate/copy your API key
+* Go to the calyptia core console and sign-in
+* On the left menu click on settings and generate/copy your API key
In your Fluent Bit configuration file, append the following configuration section:
diff --git a/administration/networking.md b/administration/networking.md
index a7c4e88eb..8179e6d3c 100644
--- a/administration/networking.md
+++ b/administration/networking.md
@@ -26,7 +26,7 @@ TCP is a _connected oriented_ channel, to deliver and receive data from a remote
The concept of `Connection Keepalive` refers to the ability of the client \(Fluent Bit on this case\) to keep the TCP connection open in a persistent way, that means that once the connection is created and used, instead of close it, it can be recycled. This feature offers many benefits in terms of performance since communication channels are always established before hand.
-Any component that uses TCP channels like HTTP or [TLS](security.md), can take advantage of this feature. For configuration purposes use the `net.keepalive` property.
+Any component that uses TCP channels like HTTP or [TLS](transport-security.md), can take advantage of this feature. For configuration purposes use the `net.keepalive` property.
### Connection Keepalive Idle Timeout
@@ -38,21 +38,31 @@ In order to control how long a keepalive connection can be idle, we expose the c
If a transport layer protocol is specified, the plugin whose configuration section the `net.dns.mode` setting is specified on overrides the global `dns.mode` value and issues DNS requests using the specified protocol which can be either TCP or UDP
+### Max Connections Per Worker
+
+By default, Fluent Bit tries to deliver data as faster as possible and create TCP connections on-demand and in keepalive mode for performance reasons. In high-scalable environments, the user might want to control how many connections are done in parallel by setting a limit.
+
+This can be done by the configuration property called `net.max_worker_connections` that can be used in the output plugins sections.
+This feature acts at the worker level, e.g., if you have 5 workers and `net.max_worker_connections` is set to 10, a max of 50 connections will be allowed.
+If the limit is reached, the output plugin will issue a retry.
+
+
## Configuration Options
For plugins that rely on networking I/O, the following section describes the network configuration properties available and how they can be used to optimize performance or adjust to different configuration needs:
-| Property | Description | Default |
-| :--- | :--- | :--- |
-| `net.connect_timeout` | Set maximum time expressed in seconds to wait for a TCP connection to be established, this include the TLS handshake time. | 10 |
-| `net.connect_timeout_log_error` | On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message. | true |
-| `net.dns.mode` | Select the primary DNS connection type (TCP or UDP). Can be set in the [SERVICE] section and overridden on a per plugin basis if desired. | |
-| `net.dns.prefer_ipv4` | Prioritize IPv4 DNS results when trying to establish a connection. | false |
-| `net.dns.resolver`| Select the primary DNS resolver type (LEGACY or ASYNC). | |
-| `net.keepalive` | Enable or disable connection keepalive support. Accepts a boolean value: on / off. | on |
-| `net.keepalive_idle_timeout` | Set maximum time expressed in seconds for an idle keepalive connection. | 30 |
-| `net.keepalive_max_recycle` | Set maximum number of times a keepalive connection can be used before it is retired. | 2000 |
-| `net.source_address` | Specify network address to bind for data traffic. | |
+| Property | Description | Default |
+| :--- |:------------------------------------------------------------------------------------------------------------------------------------------|:--------------|
+| `net.connect_timeout` | Set maximum time expressed in seconds to wait for a TCP connection to be established, this include the TLS handshake time. | 10 |
+| `net.connect_timeout_log_error` | On connection timeout, specify if it should log an error. When disabled, the timeout is logged as a debug message. | true |
+| `net.dns.mode` | Select the primary DNS connection type (TCP or UDP). Can be set in the [SERVICE] section and overridden on a per plugin basis if desired. | |
+| `net.dns.prefer_ipv4` | Prioritize IPv4 DNS results when trying to establish a connection. | false |
+| `net.dns.resolver`| Select the primary DNS resolver type (LEGACY or ASYNC). | |
+| `net.keepalive` | Enable or disable connection keepalive support. Accepts a boolean value: on / off. | on |
+| `net.keepalive_idle_timeout` | Set maximum time expressed in seconds for an idle keepalive connection. | 30 |
+| `net.keepalive_max_recycle` | Set maximum number of times a keepalive connection can be used before it is retired. | 2000 |
+| `net.max_worker_connections` | Set maximum number of TCP connections that can be established per worker. | 0 (unlimited) |
+| `net.source_address` | Specify network address to bind for data traffic. | |
## Example
diff --git a/administration/security.md b/administration/transport-security.md
similarity index 80%
rename from administration/security.md
rename to administration/transport-security.md
index 909adeda2..cc06f4125 100644
--- a/administration/security.md
+++ b/administration/transport-security.md
@@ -1,5 +1,5 @@
-# Security
+# Transport Security
Fluent Bit provides integrated support for _Transport Layer Security_ \(TLS\) and it predecessor _Secure Sockets Layer_ \(SSL\) respectively. In this section we will refer as TLS only for both implementations.
@@ -23,32 +23,51 @@ The listed properties can be enabled in the configuration file, specifically on
The following **output** plugins can take advantage of the TLS feature:
-* [Amazon CloudWatch](../pipeline/outputs/cloudwatch.md)
-* [Amazon Kinesis Data Firehose](../pipeline/outputs/firehose.md)
-* [Amazon Kinesis Data Streams](../pipeline/outputs/kinesis.md)
* [Amazon S3](../pipeline/outputs/s3.md)
+* [Apache SkyWalking](../pipeline/outputs/skywalking.md)
* [Azure](../pipeline/outputs/azure.md)
+* [Azure Blob](../pipeline/outputs/azure_blob.md)
+* [Azure Data Explorer (Kusto)](../pipeline/outputs/azure_kusto.md)
+* [Azure Logs Ingestion API](../pipeline/outputs/azure_logs_ingestion.md)
* [BigQuery](../pipeline/outputs/bigquery.md)
* [Datadog](../pipeline/outputs/datadog.md)
* [Elasticsearch](../pipeline/outputs/elasticsearch.md)
* [Forward](../pipeline/outputs/forward.md)
* [GELF](../pipeline/outputs/gelf.md)
+* [Google Chronicle](../pipeline/outputs/chronicle.md)
* [HTTP](../pipeline/outputs/http.md)
* [InfluxDB](../pipeline/outputs/influxdb.md)
* [Kafka REST Proxy](../pipeline/outputs/kafka-rest-proxy.md)
+* [LogDNA](../pipeline/outputs/logdna.md)
* [Loki](../pipeline/outputs/loki.md)
+* [New Relic](../pipeline/outputs/new-relic.md)
+* [OpenSearch](../pipeline/outputs/opensearch.md)
+* [OpenTelemetry](../pipeline/outputs/opentelemetry.md)
+* [Oracle Cloud Infrastructure Logging Analytics](../pipeline/outputs/oci-logging-analytics.md)
+* [Prometheus Remote Write](../pipeline/outputs/prometheus-remote-write.md)
* [Slack](../pipeline/outputs/slack.md)
* [Splunk](../pipeline/outputs/splunk.md)
* [Stackdriver](../pipeline/outputs/stackdriver.md)
+* [Syslog](../pipeline/outputs/syslog.md)
* [TCP & TLS](../pipeline/outputs/tcp-and-tls.md)
* [Treasure Data](../pipeline/outputs/treasure-data.md)
+* [WebSocket](../pipeline/outputs/websocket.md)
The following **input** plugins can take advantage of the TLS feature:
-* [MQTT](../pipeline/inputs/mqtt.md)
-* [TCP](../pipeline/inputs/tcp.md)
+* [Docker Events](../pipeline/inputs/docker-events.md)
+* [Elasticsearch (Bulk API)](../pipeline/inputs/elasticsearch.md)
+* [Forward](../pipeline/inputs/forward.md)
+* [Health](../pipeline/inputs/health.md)
* [HTTP](../pipeline/inputs/http.md)
+* [Kubernetes Events](../pipeline/inputs/kubernetes-events.md)
+* [MQTT](../pipeline/inputs/mqtt.md)
+* [NGINX Exporter Metrics](../pipeline/inputs/nginx.md)
* [OpenTelemetry](../pipeline/inputs/opentelemetry.md)
+* [Prometheus Scrape Metrics](../pipeline/inputs/prometheus-scrape-metrics.md)
+* [Splunk (HTTP HEC)](../pipeline/inputs/splunk.md)
+* [Syslog](../pipeline/inputs/syslog.md)
+* [TCP](../pipeline/inputs/tcp.md)
In addition, other plugins implements a sub-set of TLS support, meaning, with restricted configuration:
diff --git a/administration/troubleshooting.md b/administration/troubleshooting.md
index fa48adcef..cae5ad2ce 100644
--- a/administration/troubleshooting.md
+++ b/administration/troubleshooting.md
@@ -8,19 +8,128 @@
Tap can be used to generate events or records detailing what messages
pass through Fluent Bit, at what time and what filters affect them.
-
### Simple example
First, we will make sure that the container image we are going to use actually supports Fluent Bit Tap (available in Fluent Bit 2.0+):
```shell
-$ docker run --rm -ti fluent/fluent-bit:latest --help | grep -Z
- -Z, --enable-chunk-trace enable chunk tracing. activating it requires using the HTTP Server API.
+$ docker run --rm -ti fluent/fluent-bit:latest --help | grep trace
+ -Z, --enable-chunk-traceenable chunk tracing, it can be activated either through the http api or the command line
+ --trace-input input to start tracing on startup.
+ --trace-output output to use for tracing on startup.
+ --trace-output-property set a property for output tracing on startup.
+ --trace setup a trace pipeline on startup. Uses a single line, ie: "input=dummy.0 output=stdout output.format='json'"
```
If the `--enable-chunk-trace` option is present it means Fluent Bit has support for Fluent Bit Tap but it is disabled by default, so remember to enable it with this option.
-Tap support is enabled and disabled via the embedded web server, so enable it like so (or the equivalent option in the configuration file):
+You can start fluent-bit with tracing activated from the beginning by using the `trace-input` and `trace-output` properties, like so:
+
+```bash
+$ fluent-bit -Z -i dummy -o stdout -f 1 --trace-input=dummy.0 --trace-output=stdout
+Fluent Bit v2.1.8
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2023/07/21 16:27:01] [ info] [fluent bit] version=2.1.8, commit=824ba3dd08, pid=622937
+[2023/07/21 16:27:01] [ info] [storage] ver=1.4.0, type=memory, sync=normal, checksum=off, max_chunks_up=128
+[2023/07/21 16:27:01] [ info] [cmetrics] version=0.6.3
+[2023/07/21 16:27:01] [ info] [ctraces ] version=0.3.1
+[2023/07/21 16:27:01] [ info] [input:dummy:dummy.0] initializing
+[2023/07/21 16:27:01] [ info] [input:dummy:dummy.0] storage_strategy='memory' (memory only)
+[2023/07/21 16:27:01] [ info] [sp] stream processor started
+[2023/07/21 16:27:01] [ info] [output:stdout:stdout.0] worker #0 started
+[2023/07/21 16:27:01] [ info] [fluent bit] version=2.1.8, commit=824ba3dd08, pid=622937
+[2023/07/21 16:27:01] [ info] [storage] ver=1.4.0, type=memory, sync=normal, checksum=off, max_chunks_up=128
+[2023/07/21 16:27:01] [ info] [cmetrics] version=0.6.3
+[2023/07/21 16:27:01] [ info] [ctraces ] version=0.3.1
+[2023/07/21 16:27:01] [ info] [input:emitter:trace-emitter] initializing
+[2023/07/21 16:27:01] [ info] [input:emitter:trace-emitter] storage_strategy='memory' (memory only)
+[2023/07/21 16:27:01] [ info] [sp] stream processor started
+[2023/07/21 16:27:01] [ info] [output:stdout:stdout.0] worker #0 started
+.[0] dummy.0: [[1689971222.068537501, {}], {"message"=>"dummy"}]
+[0] dummy.0: [[1689971223.068556121, {}], {"message"=>"dummy"}]
+[0] trace: [[1689971222.068677045, {}], {"type"=>1, "trace_id"=>"0", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971222, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971222, "end_time"=>1689971222}]
+[1] trace: [[1689971222.068735577, {}], {"type"=>3, "trace_id"=>"0", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971222, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971222, "end_time"=>1689971222}]
+[0] dummy.0: [[1689971224.068586317, {}], {"message"=>"dummy"}]
+[0] trace: [[1689971223.068626923, {}], {"type"=>1, "trace_id"=>"1", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971223, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971223, "end_time"=>1689971223}]
+[1] trace: [[1689971223.068675735, {}], {"type"=>3, "trace_id"=>"1", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971223, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971223, "end_time"=>1689971223}]
+[2] trace: [[1689971224.068689341, {}], {"type"=>1, "trace_id"=>"2", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971224, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971224, "end_time"=>1689971224}]
+[3] trace: [[1689971224.068747182, {}], {"type"=>3, "trace_id"=>"2", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971224, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971224, "end_time"=>1689971224}]
+^C[2023/07/21 16:27:05] [engine] caught signal (SIGINT)
+[2023/07/21 16:27:05] [ warn] [engine] service will shutdown in max 5 seconds
+[2023/07/21 16:27:05] [ info] [input] pausing dummy.0
+[0] dummy.0: [[1689971225.068568875, {}], {"message"=>"dummy"}]
+[2023/07/21 16:27:06] [ info] [engine] service has stopped (0 pending tasks)
+[2023/07/21 16:27:06] [ info] [input] pausing dummy.0
+[2023/07/21 16:27:06] [ warn] [engine] service will shutdown in max 1 seconds
+[0] trace: [[1689971225.068654038, {}], {"type"=>1, "trace_id"=>"3", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971225, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971225, "end_time"=>1689971225}]
+[1] trace: [[1689971225.068695829, {}], {"type"=>3, "trace_id"=>"3", "plugin_instance"=>"dummy.0", "records"=>[{"timestamp"=>1689971225, "record"=>{"message"=>"dummy"}}], "start_time"=>1689971225, "end_time"=>1689971225}]
+[2023/07/21 16:27:07] [ info] [engine] service has stopped (0 pending tasks)
+[2023/07/21 16:27:07] [ info] [output:stdout:stdout.0] thread worker #0 stopping...
+[2023/07/21 16:27:07] [ info] [output:stdout:stdout.0] thread worker #0 stopped
+[2023/07/21 16:27:07] [ info] [output:stdout:stdout.0] thread worker #0 stopping...
+[2023/07/21 16:27:07] [ info] [output:stdout:stdout.0] thread worker #0 stopped
+```
+
+If you see the following warning then the `-Z` or `--enable-chunk-tracing` option is missing:
+
+```bash
+[2023/07/21 16:26:42] [ warn] [chunk trace] enable chunk tracing via the configuration or command line to be able to activate tracing.
+```
+
+Properties can be set for the output using the `--trace-output-property` option:
+
+```bash
+$ fluent-bit -Z -i dummy -o stdout -f 1 --trace-input=dummy.0 --trace-output=stdout --trace-output-property=format=json_lines
+Fluent Bit v2.1.8
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2023/07/21 16:28:59] [ info] [fluent bit] version=2.1.8, commit=824ba3dd08, pid=623170
+[2023/07/21 16:28:59] [ info] [storage] ver=1.4.0, type=memory, sync=normal, checksum=off, max_chunks_up=128
+[2023/07/21 16:28:59] [ info] [cmetrics] version=0.6.3
+[2023/07/21 16:28:59] [ info] [ctraces ] version=0.3.1
+[2023/07/21 16:28:59] [ info] [input:dummy:dummy.0] initializing
+[2023/07/21 16:28:59] [ info] [input:dummy:dummy.0] storage_strategy='memory' (memory only)
+[2023/07/21 16:28:59] [ info] [sp] stream processor started
+[2023/07/21 16:28:59] [ info] [output:stdout:stdout.0] worker #0 started
+[2023/07/21 16:28:59] [ info] [fluent bit] version=2.1.8, commit=824ba3dd08, pid=623170
+[2023/07/21 16:28:59] [ info] [storage] ver=1.4.0, type=memory, sync=normal, checksum=off, max_chunks_up=128
+[2023/07/21 16:28:59] [ info] [cmetrics] version=0.6.3
+[2023/07/21 16:28:59] [ info] [ctraces ] version=0.3.1
+[2023/07/21 16:28:59] [ info] [input:emitter:trace-emitter] initializing
+[2023/07/21 16:28:59] [ info] [input:emitter:trace-emitter] storage_strategy='memory' (memory only)
+[2023/07/21 16:29:00] [ info] [sp] stream processor started
+[2023/07/21 16:29:00] [ info] [output:stdout:stdout.0] worker #0 started
+.[0] dummy.0: [[1689971340.068565891, {}], {"message"=>"dummy"}]
+[0] dummy.0: [[1689971341.068632477, {}], {"message"=>"dummy"}]
+{"date":1689971340.068745,"type":1,"trace_id":"0","plugin_instance":"dummy.0","records":[{"timestamp":1689971340,"record":{"message":"dummy"}}],"start_time":1689971340,"end_time":1689971340}
+{"date":1689971340.068825,"type":3,"trace_id":"0","plugin_instance":"dummy.0","records":[{"timestamp":1689971340,"record":{"message":"dummy"}}],"start_time":1689971340,"end_time":1689971340}
+[0] dummy.0: [[1689971342.068613646, {}], {"message"=>"dummy"}]
+```
+
+With that options set the stdout plugin is now emitting traces in `json_lines` format:
+
+```json
+{"date":1689971340.068745,"type":1,"trace_id":"0","plugin_instance":"dummy.0","records":[{"timestamp":1689971340,"record":{"message":"dummy"}}],"start_time":1689971340,"end_time":1689971340}
+```
+
+All three options can also be defined using the much more flexible `--trace` option:
+
+```bash
+$ fluent-bit -Z -i dummy -o stdout -f 1 --trace="input=dummy.0 output=stdout output.format=json_lines"
+```
+
+We defined the entire tap pipeline using this configuration: `input=dummy.0 output=stdout output.format=json_lines` which defines the following:
+
+ * input: dummy.0 (listens to the tag and/or alias `dummy.0`)
+ * output: stdout (outputs to a stdout plugin)
+ * output.format: json_lines (sets the stdout format o `json_lines`)
+
+Tap support can also be activated and deactivated via the embedded web server:
```shell
$ docker run --rm -ti -p 2020:2020 fluent/fluent-bit:latest -Z -H -i dummy -p alias=input_dummy -o stdout -f 1
diff --git a/concepts/data-pipeline/router.md b/concepts/data-pipeline/router.md
index 416bdf8cc..0041c992e 100644
--- a/concepts/data-pipeline/router.md
+++ b/concepts/data-pipeline/router.md
@@ -60,3 +60,25 @@ Routing is flexible enough to support _wildcard_ in the **Match** pattern. The b
```
The match rule is set to **my\_\*** which means it will match any Tag that starts with **my\_**.
+
+## Routing with Regex
+
+Routing also provides support for _regex_ with the **Match_Regex** pattern, allowing for more complex and precise matching criteria.
+The following example demonstrates how to route data from sources based on a regular expression:
+
+```
+[INPUT]
+ Name temperature_sensor
+ Tag temp_sensor_A
+
+[INPUT]
+ Name humidity_sensor
+ Tag humid_sensor_B
+
+[OUTPUT]
+ Name stdout
+ Match_regex .*_sensor_[AB]
+```
+
+In this configuration, the **Match_regex** rule is set to `.*_sensor_[AB]`. This regular expression will match any Tag that ends with "_sensor_A" or "_sensor_B", regardless of what precedes it.
+This approach provides a more flexible and powerful way to handle different source tags with a single routing rule.
diff --git a/concepts/key-concepts.md b/concepts/key-concepts.md
index b9d254ff6..c3b70801a 100644
--- a/concepts/key-concepts.md
+++ b/concepts/key-concepts.md
@@ -30,12 +30,36 @@ Jan 18 12:52:16 flb gsd-media-keys[2640]: # watch_fast: "/org/gnome/terminal/leg
It contains four lines and all of them represents **four** independent Events.
-Internally, an Event always has two components \(in an array form\):
+Internally an Event is comprised of:
+
+* timestamp
+* key/value metadata (since v2.1.0)
+* payload
+
+### Event format
+
+The Fluent Bit wire protocol represents an Event as a 2-element array
+with a nested array as the first element:
+
+```javascript
+[[TIMESTAMP, METADATA], MESSAGE]
+```
+
+where
+
+* TIMESTAMP is a timestamp in seconds as an integer or floating point value (not a string);
+* METADATA is a possibly-empty object containing event metadata; and
+* MESSAGE is an object containing the event body.
+
+Fluent Bit versions prior to v2.1.0 instead used:
```javascript
[TIMESTAMP, MESSAGE]
```
+to represent events. This format is still supported for reading input event
+streams.
+
## Filtering
In some cases it is required to perform modifications on the Events content, the process to alter, enrich or drop Events is called Filtering.
diff --git a/development/developer-guide.md b/development/developer-guide.md
index 73db34c0c..cf001047f 100644
--- a/development/developer-guide.md
+++ b/development/developer-guide.md
@@ -312,6 +312,16 @@ Output plugins are defined in [flb\_output.h](https://github.com/fluent/fluent-b
The [stdout plugin](https://github.com/fluent/fluent-bit/tree/master/plugins/out_stdout) is very simple; review its code to understand how output plugins work.
+## Development Environment
+
+Fluent Bit provides a standalone environment for development.
+Developers who use different OS or distributions can develop on a simple, common stack.
+The development environment provides the required libraries and tools for you.
+
+Development environments provided for
+- [Devcontainer](https://github.com/fluent/fluent-bit/blob/master/DEVELOPER_GUIDE.md#devcontainer)
+- [Vagrant](https://github.com/fluent/fluent-bit/blob/master/DEVELOPER_GUIDE.md#vagrant).
+
## Testing
During development, you can build Fluent Bit as follows:
diff --git a/development/golang-output-plugins.md b/development/golang-output-plugins.md
index c3039518c..af7d1e5a9 100644
--- a/development/golang-output-plugins.md
+++ b/development/golang-output-plugins.md
@@ -137,6 +137,10 @@ The following is an example of a main configuration file.
Name gstdout
```
+#### Config key constraint
+
+Some config keys are reserved by Fluent Bit and must not be used by a custom plugin, they are: `alias`,`host`,`ipv6`,`listen`,`log_level`,`log_suppress_interval`,`match`,`match_regex`,`mem_buf_limit`,`port`,`retry_limit`,`routable`,`storage.pause_on_chunks_overlimit`, `storage.total_limit_size`, `storage.type`, `tag`,`threaded`,`tls`,`tls.ca_file`, `tls.ca_path`, `tls.crt_file`, `tls.debug`, `tls.key_file`, `tls.key_passwd`, `tls.verify`, `tls.vhost`, `workers`
+
### Run using a configuration file
We can load a main configuration file using `-c` option.
diff --git a/development/wasm-filter-plugins.md b/development/wasm-filter-plugins.md
index 934dda104..b47207a1d 100644
--- a/development/wasm-filter-plugins.md
+++ b/development/wasm-filter-plugins.md
@@ -15,10 +15,10 @@ There are no additional requirements to execute WASM plugins.
`flb-wamrc` is just `flb-` prefixed AOT (Ahead Of Time) compiler that is provided from [wasm-micro-runtime](https://github.com/bytecodealliance/wasm-micro-runtime).
-For `flb-wamrc` support, users have to install llvm infrastructure, e.g:
+For `flb-wamrc` support, users have to install llvm infrastructure and some additional libraries (`libmlir`, `libPolly`, `libedit`, and `libpfm`), e.g:
```text
-# apt install -y llvm
+# apt install -y llvm libmlir-14-dev libclang-common-14-dev libedit-dev libpfm4-dev
```
### For Build WASM programs
@@ -111,7 +111,7 @@ Once built, a WASM program will be available. Then, that built program can be ex
[FILTER]
Name wasm
- Tag dummy.*
+ Match dummy.*
WASM_Path /path/to/built_filter.wasm
Function_Name super_awesome_filter
accessible_paths .,/path/to/fluent-bit
@@ -128,6 +128,44 @@ For example, one of the examples of [Rust WASM filter](https://github.com/fluent
[0] dummy.local: [1666270589.270348000, {"lang"=>"Rust", "message"=>"dummy", "original"=>"{"message":"dummy"}", "tag"=>"dummy.local", "time"=>"2022-10-20T12:56:29.270348000 +0000"}]
[0] dummy.local: [1666270590.271107000, {"lang"=>"Rust", "message"=>"dummy", "original"=>"{"message":"dummy"}", "tag"=>"dummy.local", "time"=>"2022-10-20T12:56:30.271107000 +0000"}]
```
+Another example of a Rust WASM filter is the [flb_filter_iis](https://github.com/kenriortega/flb_filter_iis) filter.
+This filter takes the [Internet Information Services (IIS)](https://learn.microsoft.com/en-us/iis/manage/provisioning-and-managing-iis/configure-logging-in-iis) [w3c logs](https://learn.microsoft.com/en-us/iis/manage/provisioning-and-managing-iis/configure-logging-in-iis#select-w3c-fields-to-log) (with some custom modifications) and transforms the raw string into a standard Fluent Bit JSON structured record.
+
+```text
+[INPUT]
+ Name dummy
+ Dummy {"log": "2023-08-11 19:56:44 W3SVC1 WIN-PC1 ::1 GET / - 80 ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/115.0.0.0+Safari/537.36+Edg/115.0.1901.200 - - localhost 304 142 756 1078 -"}
+ Tag iis.*
+
+[FILTER]
+ Name wasm
+ match iis.*
+ WASM_Path /plugins/flb_filter_iis_wasm.wasm
+ Function_Name flb_filter_log_iis_w3c_custom
+ accessible_paths .
+
+[OUTPUT]
+ name stdout
+ match iis.*
+```
+
+The incoming raw strings from an IIS log are composed of the following fields:
+
+`date time s-sitename s-computername s-ip cs-method cs-uri-stem cs-uri-query s-port c-ip cs(User-Agent) cs(Cookie) cs(Referer) cs-host sc-status sc-bytes cs-bytes time-taken c-authorization-header`
+
+The output after the filter logic will be:
+
+```text
+[0] iis.*: [[1692131925.559486675, {}], {"c_authorization_header"=>"-", "c_ip"=>"::1", "cs_bytes"=>756, "cs_cookie"=>"-", "cs_host"=>"localhost", "cs_method"=>"GET", "cs_referer"=>"-", "cs_uri_query"=>"-", "cs_uri_stem"=>"/", "cs_user_agent"=>"Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/115.0.0.0+Safari/537.36+Edg/115.0.1901.200", "date"=>"2023-08-11 19:56:44", "s_computername"=>"WIN-PC1", "s_ip"=>"::1", "s_port"=>"80", "s_sitename"=>"W3SVC1", "sc_bytes"=>142, "sc_status"=>"304", "source"=>"LogEntryIIS", "tag"=>"iis.*", "time"=>"2023-08-15T20:38:45.559486675 +0000", "time_taken"=>1078}]
+```
+This filter approach provides us with several powerful advantages inherent to programming languages.
+For instance, it:
+- Can be extended by adding type conversion to fields such as `sc_bytes, cs_bytes, time_taken`. This is particularly useful when we need to validate our data results.
+- Allows for the use of conditions to apply more descriptive filters, for example, "get only all logs that contain status codes above 4xx or 5xx".
+- Can be used to define a `allow/deny` list using a data structure array or a file to store predefined IP addresses.
+- Makes it possible to call an external resource such as an API or database to enhance our data.
+- Allows all methods to be thoroughly tested and shared as a binary bundle or library.
+These examples can be applied in our demo and can serve as an ideal starting point to create more complex logic, depending on our requirements.
### Optimize execution of WASM programs
diff --git a/installation/docker.md b/installation/docker.md
index b9dc56a4d..7565c055d 100644
--- a/installation/docker.md
+++ b/installation/docker.md
@@ -15,8 +15,36 @@ docker run -ti cr.fluentbit.io/fluent/fluent-bit
The following table describes the Linux container tags that are available on Docker Hub [fluent/fluent-bit](https://hub.docker.com/r/fluent/fluent-bit/) repository:
-| Tag(s) | Manifest Architectures | Description |
-| ----------- | ------------------------- | ------------------------------------------------------------ |
+| Tag(s) | Manifest Architectures | Description |
+| ------------ | ------------------------- | -------------------------------------------------------------- |
+| 2.2.0-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.2.0 | x86_64, arm64v8, arm32v7 | Release [v2.2.0](https://fluentbit.io/announcements/v2.2.0/) |
+| 2.1.10-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.10 | x86_64, arm64v8, arm32v7 | Release [v2.1.10](https://fluentbit.io/announcements/v2.1.10/) |
+| 2.1.9-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.9 | x86_64, arm64v8, arm32v7 | Release [v2.1.9](https://fluentbit.io/announcements/v2.1.9/) |
+| 2.1.8-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.8 | x86_64, arm64v8, arm32v7 | Release [v2.1.8](https://fluentbit.io/announcements/v2.1.8/) |
+| 2.1.7-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.7 | x86_64, arm64v8, arm32v7 | Release [v2.1.7](https://fluentbit.io/announcements/v2.1.7/) |
+| 2.1.6-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.6 | x86_64, arm64v8, arm32v7 | Release [v2.1.6](https://fluentbit.io/announcements/v2.1.6/) |
+| 2.1.5 | x86_64, arm64v8, arm32v7 | Release [v2.1.5](https://fluentbit.io/announcements/v2.1.5/) |
+| 2.1.5-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.4 | x86_64, arm64v8, arm32v7 | Release [v2.1.4](https://fluentbit.io/announcements/v2.1.4/) |
+| 2.1.4-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.3 | x86_64, arm64v8, arm32v7 | Release [v2.1.3](https://fluentbit.io/announcements/v2.1.3/) |
+| 2.1.3-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.2 | x86_64, arm64v8, arm32v7 | Release [v2.1.2](https://fluentbit.io/announcements/v2.1.2/) |
+| 2.1.2-debug | x86_64, arm64v8, arm32v7 | Debug images |
+| 2.1.1 | x86\_64, arm64v8, arm32v7 | Release [v2.1.1](https://fluentbit.io/announcements/v2.1.1/) |
+| 2.1.1-debug | x86\_64, arm64v8, arm32v7 | v2.1.x releases (production + debug) |
+| 2.1.0 | x86\_64, arm64v8, arm32v7 | Release [v2.1.0](https://fluentbit.io/announcements/v2.1.0/) |
+| 2.1.0-debug | x86\_64, arm64v8, arm32v7 | v2.1.x releases (production + debug) |
+| 2.0.11 | x86\_64, arm64v8, arm32v7 | Release [v2.0.11](https://fluentbit.io/announcements/v2.0.11/) |
+| 2.0.11-debug | x86\_64, arm64v8, arm32v7 | v2.0.x releases (production + debug) |
+| 2.0.10 | x86\_64, arm64v8, arm32v7 | Release [v2.0.10](https://fluentbit.io/announcements/v2.0.10/) |
+| 2.0.10-debug | x86\_64, arm64v8, arm32v7 | v2.0.x releases (production + debug) |
| 2.0.9 | x86\_64, arm64v8, arm32v7 | Release [v2.0.9](https://fluentbit.io/announcements/v2.0.9/) |
| 2.0.9-debug | x86\_64, arm64v8, arm32v7 | v2.0.x releases (production + debug) |
| 2.0.8 | x86\_64, arm64v8, arm32v7 | Release [v2.0.8](https://fluentbit.io/announcements/v2.0.8/) |
diff --git a/installation/getting-started-with-fluent-bit.md b/installation/getting-started-with-fluent-bit.md
index 87a840450..6bdc4e2c9 100644
--- a/installation/getting-started-with-fluent-bit.md
+++ b/installation/getting-started-with-fluent-bit.md
@@ -57,4 +57,4 @@ Fluent Bit Sandbox Environment
Fluent Bit packages are also provided by [enterprise providers](https://fluentbit.io/enterprise) for older end of life versions, Unix systems, and additional support and features including aspects like CVE backporting.
A list provided by fluentbit.io/enterprise is provided below
-* [Calyptia Fluent Bit LTS](https://www.calyptia.com/download)
+* [Calyptia Fluent Bit LTS](https://calyptia.com/products/lts-fluentbit/)
diff --git a/installation/kubernetes.md b/installation/kubernetes.md
index f2b547893..da44eae16 100644
--- a/installation/kubernetes.md
+++ b/installation/kubernetes.md
@@ -151,7 +151,7 @@ spec:
### Configure Fluent Bit
-Assuming the basic volume configuration described above, you can apply the following config to start logging. You can visualize this configuration [here](https://link.calyptia.com/gzc)
+Assuming the basic volume configuration described above, you can apply the following config to start logging. You can visualize this configuration [here (Sign-up required)](https://calyptia.com/free-trial)
```yaml
fluent-bit.conf: |
diff --git a/installation/linux/amazon-linux.md b/installation/linux/amazon-linux.md
index 023664fdf..186763bd8 100644
--- a/installation/linux/amazon-linux.md
+++ b/installation/linux/amazon-linux.md
@@ -2,7 +2,7 @@
## Install on Amazon Linux
-Fluent Bit is distributed as **fluent-bit** package and is available for the latest Amazon Linux 2 and Amazon Linux 2022.
+Fluent Bit is distributed as **fluent-bit** package and is available for the latest Amazon Linux 2 and Amazon Linux 2023.
The following architectures are supported
* x86\_64
@@ -22,12 +22,7 @@ The recommended secure deployment approach is to follow the instructions below.
### Amazon Linux 2022
-For Amazon Linux 2022, until it is GA, we need to force it to use the 2022 `releasever` in Yum but only for the Fluent Bit repository.
-
-```bash
-export FLUENT_BIT_INSTALL_COMMAND_PREFIX="sed -i 's|\$releasever/|2022/|g' /etc/yum.repos.d/fluent-bit.repo"
-curl https://raw.githubusercontent.com/fluent/fluent-bit/master/install.sh | sh
-```
+Amazon Linux 2022 was previously supported but is removed since it became GA Amazon Linux 2023
## Configure Yum
@@ -38,18 +33,18 @@ We provide **fluent-bit** through a Yum repository. In order to add the reposito
```config
[fluent-bit]
name = Fluent Bit
-baseurl = https://packages.fluentbit.io/amazonlinux/2/$basearch/
+baseurl = https://packages.fluentbit.io/amazonlinux/2/
gpgcheck=1
gpgkey=https://packages.fluentbit.io/fluentbit.key
enabled=1
```
-### Amazon Linux 2022
+### Amazon Linux 2023
```config
[fluent-bit]
name = Fluent Bit
-baseurl = https://packages.fluentbit.io/amazonlinux/2022/$basearch/
+baseurl = https://packages.fluentbit.io/amazonlinux/2023/
gpgcheck=1
gpgkey=https://packages.fluentbit.io/fluentbit.key
enabled=1
@@ -83,26 +78,25 @@ Refer to the [supported platform documentation](../supported-platforms.md) to se
Once your repository is configured, run the following command to install it:
```bash
-yum install fluent-bit
+sudo yum install fluent-bit
```
Now the following step is to instruct _systemd_ to enable the service:
```bash
-sudo service fluent-bit start
+sudo systemctl start fluent-bit
```
If you do a status check, you should see a similar output like this:
```bash
-$ service fluent-bit status
-Redirecting to /bin/systemctl status fluent-bit.service
+$ systemctl status fluent-bit
● fluent-bit.service - Fluent Bit
Loaded: loaded (/usr/lib/systemd/system/fluent-bit.service; disabled; vendor preset: disabled)
Active: active (running) since Thu 2016-07-07 02:08:01 BST; 9s ago
Main PID: 3820 (fluent-bit)
CGroup: /system.slice/fluent-bit.service
- └─3820 /opt/fluent-bit/bin/fluent-bit -c etc/fluent-bit/fluent-bit.conf
+ └─3820 /opt/fluent-bit/bin/fluent-bit -c /etc/fluent-bit/fluent-bit.conf
...
```
diff --git a/installation/linux/redhat-centos.md b/installation/linux/redhat-centos.md
index 68468e659..277a26f8a 100644
--- a/installation/linux/redhat-centos.md
+++ b/installation/linux/redhat-centos.md
@@ -43,7 +43,7 @@ We provide **fluent-bit** through a Yum repository. In order to add the reposito
```shell
[fluent-bit]
name = Fluent Bit
-baseurl = https://packages.fluentbit.io/centos/$releasever/$basearch/
+baseurl = https://packages.fluentbit.io/centos/$releasever/
gpgcheck=1
gpgkey=https://packages.fluentbit.io/fluentbit.key
repo_gpgcheck=1
@@ -78,20 +78,19 @@ Refer to the [supported platform documentation](../supported-platforms.md) to se
Once your repository is configured, run the following command to install it:
```bash
-yum install fluent-bit
+sudo yum install fluent-bit
```
Now the following step is to instruct _Systemd_ to enable the service:
```bash
-sudo service fluent-bit start
+sudo systemctl start fluent-bit
```
If you do a status check, you should see a similar output like this:
```bash
-$ service fluent-bit status
-Redirecting to /bin/systemctl status fluent-bit.service
+$ systemctl status fluent-bit
● fluent-bit.service - Fluent Bit
Loaded: loaded (/usr/lib/systemd/system/fluent-bit.service; disabled; vendor preset: disabled)
Active: active (running) since Thu 2016-07-07 02:08:01 BST; 9s ago
diff --git a/installation/linux/ubuntu.md b/installation/linux/ubuntu.md
index c945bd1df..5e8f20755 100644
--- a/installation/linux/ubuntu.md
+++ b/installation/linux/ubuntu.md
@@ -67,6 +67,7 @@ We recommend upgrading your system (`sudo apt-get upgrade`). This could avoid po
{% hint style="info" %}
If you have the following error "Certificate verification failed", you might want to check if the package `ca-certificates` is properly installed (`sudo apt-get install ca-certificates`).
{% endhint %}
+
## Install Fluent Bit
Using the following _apt-get_ command you are able now to install the latest _fluent-bit_:
@@ -84,7 +85,7 @@ sudo systemctl start fluent-bit
If you do a status check, you should see a similar output like this:
```bash
-sudo service status fluent-bit
+systemctl status fluent-bit
● fluent-bit.service - Fluent Bit
Loaded: loaded (/lib/systemd/system/fluent-bit.service; disabled; vendor preset: enabled)
Active: active (running) since mié 2016-07-06 16:58:25 CST; 2h 45min ago
diff --git a/installation/macos.md b/installation/macos.md
index 6b69c13b1..b872b4509 100644
--- a/installation/macos.md
+++ b/installation/macos.md
@@ -1,7 +1,11 @@
# macOS
Fluent Bit is compatible with latest Apple macOS system on x86_64 and Apple Silicon M1 architectures.
-At the moment there is no official supported package but you can build it from sources by following the instructions below.
+At the moment there is only an official supported package on x86_64 but you can build it from source as well by following the instructions below.
+
+## Installation Packages
+
+The packages can be found here:
## Requirements
diff --git a/installation/requirements.md b/installation/requirements.md
index 43fddcd3f..efaf1ada0 100644
--- a/installation/requirements.md
+++ b/installation/requirements.md
@@ -1,6 +1,6 @@
# Requirements
-[Fluent Bit](http://fluentbit.io) uses very low CPU and Memory consumption, it's compatible with most of x86, x86\_64, arm32v7 and arm64v8 based platforms. In order to build it you need the following components in your system for the build process:
+[Fluent Bit](http://fluentbit.io) uses very low CPU and Memory consumption, it's compatible with most of x86, x86\_64, arm32v7, arm64v8 based platforms. In order to build it you need the following components in your system for the build process:
* Compiler: GCC or clang
* CMake
@@ -9,3 +9,4 @@
In the core there are not other dependencies, For certain features that depends on third party components like output plugins with special backend libraries \(e.g: kafka\), those are included in the main source code repository.
+Fluent Bit is supported on Linux on IBM Z(s390x), but the WASM and LUA filter plugins are not.
diff --git a/installation/sources/build-and-install.md b/installation/sources/build-and-install.md
index b7c7c4aad..4453ce96e 100644
--- a/installation/sources/build-and-install.md
+++ b/installation/sources/build-and-install.md
@@ -4,9 +4,9 @@
## Requirements
-- CMake >= 3.0
+- CMake >= 3.12
- Flex
-- Bison
+- Bison >= 3
- YAML headers
- OpenSSL headers
@@ -132,6 +132,7 @@ The _input plugins_ provides certain features to gather information from a speci
| [FLB\_IN\_EXEC](../../pipeline/inputs/exec.md) | Enable Exec input plugin | On |
| [FLB\_IN\_EXEC\_WASI](../../pipeline/inputs/exec-wasi.md) | Enable Exec WASI input plugin | On |
| [FLB_IN_FLUENTBIT_METRICS](../../pipeline/inputs/fluentbit-metrics.md) | Enable Fluent Bit metrics input plugin | On |
+| [FLB\_IN\_ELASTICSEARCH](../../pipeline/inputs/elasticsearch.md) | Enable Elasticsearch/OpenSearch Bulk input plugin | On |
| [FLB\_IN\_FORWARD](../../pipeline/inputs/forward.md) | Enable Forward input plugin | On |
| [FLB\_IN\_HEAD](../../pipeline/inputs/head.md) | Enable Head input plugin | On |
| [FLB\_IN\_HEALTH](../../pipeline/inputs/health.md) | Enable Health input plugin | On |
@@ -148,6 +149,7 @@ The _input plugins_ provides certain features to gather information from a speci
| [FLB\_IN\_TAIL](../../pipeline/inputs/tail.md) | Enable Tail \(follow files\) input plugin | On |
| [FLB\_IN\_TCP](../../pipeline/inputs/tcp.md) | Enable TCP input plugin | On |
| [FLB\_IN\_THERMAL](../../pipeline/inputs/thermal.md) | Enable system temperature\(s\) input plugin | On |
+| [FLB\_IN\_UDP](../../pipeline/inputs/udp.md) | Enable UDP input plugin | On |
| [FLB\_IN\_WINLOG](../../pipeline/inputs/windows-event-log.md) | Enable Windows Event Log input plugin \(Windows Only\) | On |
| [FLB\_IN\_WINEVTLOG](../../pipeline/inputs/windows-event-log-winevtlog.md) | Enable Windows Event Log input plugin using winevt.h API \(Windows Only\) | On |
@@ -169,7 +171,9 @@ The _filter plugins_ allows to modify, enrich or drop records. The following tab
| [FLB\_FILTER\_RECORD\_MODIFIER](../../pipeline/filters/record-modifier.md) | Enable Record Modifier filter | On |
| [FLB\_FILTER\_REWRITE\_TAG](../../pipeline/filters/rewrite-tag.md) | Enable Rewrite Tag filter | On |
| [FLB\_FILTER\_STDOUT](../../pipeline/filters/standard-output.md) | Enable Stdout filter | On |
+| [FLB\_FILTER\_SYSINFO](../../pipeline/filters/sysinfo.md) | Enable Sysinfo filter | On |
| [FLB\_FILTER\_THROTTLE](../../pipeline/filters/throttle.md) | Enable Throttle filter | On |
+| [FLB\_FILTER\_TYPE\_CONVERTER](../../pipeline/filters/type-converter.md) | Enable Type Converter filter | On |
| [FLB\_FILTER\_WASM](../../pipeline/filters/wasm.md) | Enable WASM filter | On |
### Output Plugins
diff --git a/installation/supported-platforms.md b/installation/supported-platforms.md
index 768534dad..778edbb6a 100644
--- a/installation/supported-platforms.md
+++ b/installation/supported-platforms.md
@@ -4,7 +4,7 @@ The following operating systems and architectures are supported in Fluent Bit.
| Operating System | Distribution | Architectures |
| :--- | :--- | :--- |
-| Linux | [Amazon Linux 2022](linux/amazon-linux.md) | x86\_64, Arm64v8 |
+| Linux | [Amazon Linux 2023](linux/amazon-linux.md) | x86\_64, Arm64v8 |
| | [Amazon Linux 2](linux/amazon-linux.md) | x86\_64, Arm64v8 |
| | [Centos 9 Stream](linux/redhat-centos.md) | x86\_64, Arm64v8 |
| | [Centos 8](linux/redhat-centos.md) | x86\_64, Arm64v8 |
@@ -29,3 +29,5 @@ From an architecture support perspective, Fluent Bit is fully functional on x86\
Fluent Bit can work also on OSX and \*BSD systems, but not all plugins will be available on all platforms.
Official support will be expanding based on community demand.
Fluent Bit may run on older operating systems though will need to be built from source, or use custom packages from [enterprise providers](https://fluentbit.io/enterprise).
+
+Fluent Bit is supported on Linux on IBM Z (s390x) environment with some restrictions but only container images are provided for these targets officially.
diff --git a/installation/windows.md b/installation/windows.md
index 9e48f6f7b..bc5d21927 100644
--- a/installation/windows.md
+++ b/installation/windows.md
@@ -1,7 +1,8 @@
# Windows
-Fluent Bit is distributed as **fluent-bit** package for Windows and as a [Windows container on Docker Hub](./docker.md).
-Fluent Bit has two flavours of Windows installers: a ZIP archive (for quick testing) and an EXE installer (for system installation).
+Fluent Bit is distributed as **fluent-bit** package for Windows and as a [Windows container on Docker Hub](docker.md). Fluent Bit has two flavours of Windows installers: a ZIP archive (for quick testing) and an EXE installer (for system installation).
+
+Not all plugins are supported on Windows: the [CMake configuration](https://github.com/fluent/fluent-bit/blob/master/cmake/windows-setup.cmake) shows the default set of supported plugins.
## Configuration
@@ -74,24 +75,34 @@ Make sure to provide a valid Windows configuration with the installation, a samp
## Migration to Fluent Bit
-From version 1.9, `td-agent-bit` is a deprecated package and was removed after 1.9.9.
-The correct package name to use now is `fluent-bit`.
+From version 1.9, `td-agent-bit` is a deprecated package and was removed after 1.9.9. The correct package name to use now is `fluent-bit`.
## Installation Packages
-The latest stable version is 2.0.9, each version is available on the Github release as well as at `https://releases.fluentbit.io//fluent-bit--win[32|64].[exe|zip]`:
+The latest stable version is 2.2.0.
+Each version is available via the following download URLs.
| INSTALLERS | SHA256 CHECKSUMS |
| ------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------- |
-| [fluent-bit-2.0.9-win32.exe](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win32.exe) | [a6c1a74acc00ce6211694f4f0a037b1b6ce3ab8dd4e6d857ea7d0d4cbadec682](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win32.exe.sha256) |
-| [fluent-bit-2.0.9-win32.zip](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win32.zip) | [8c0935a89337d073d4eae3440c65f55781bc097cdefa8819d2475db6c1befc9c](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win32.zip.sha256) |
-| [fluent-bit-2.0.9-win64.exe](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win64.exe) | [7970350f5bd0212be7d87ad51046a6d1600f3516c6209cd69af6d95759d280df](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win64.exe.sha256) |
-| [fluent-bit-2.0.9-win64.zip](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win64.zip) | [94750cf1faf6f5594047f70c585577ee38d8cdd4d6e098eefb3e665c98c3709f](https://releases.fluentbit.io/2.0/fluent-bit-2.0.9-win64.zip.sha256) |
+| [fluent-bit-2.2.0-win32.exe](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win32.exe) | [9ba2a84ce66bd899131896b04d37c4b5cb6fe5995f1a756f3a349457e4aff438](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win32.exe.sha256) |
+| [fluent-bit-2.2.0-win32.zip](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win32.zip) | [9d176e3e490f383f5bc5a1c84cd200ee57978f24659bb356e5a6958653c1ec9d](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win32.zip.sha256) |
+| [fluent-bit-2.2.0-win64.exe](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win64.exe) | [f2e46027320e656daff4758690e6d0a2a55b66b48728d1c16cf85988b5260364](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win64.exe.sha256) |
+| [fluent-bit-2.2.0-win64.zip](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win64.zip) | [45baf22505c29d79b14b14471ddd1c2272016781622441037114cb08ee2d0921](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win64.zip.sha256) |
+| [fluent-bit-2.2.0-winarm64.exe](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-winarm64.exe) | [6a80f2677224a8cac6400c1e75952dc22b0bccd6e84ac611974daec127ae1b08](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-winarm64.exe.sha256) |
+| [fluent-bit-2.2.0-winarm64.zip](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-winarm64.zip) | [43470ae91782c706062b62758f1da735d17b227ac8f6cfda5503e3a1a73b14ac](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-winarm64.zip.sha256) |
+
+**Note these are now using the Github Actions built versions, the legacy AppVeyor builds are still available (AMD 32/64 only) at releases.fluentbit.io but are deprecated.**
+
+MSI installers are also available:
+
+- [fluent-bit-2.2.0-win32.msi](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win32.msi)
+- [fluent-bit-2.2.0-win64.msi](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-win64.msi)
+- [fluent-bit-2.2.0-winarm64.msi](https://packages.fluentbit.io/windows/fluent-bit-2.2.0-winarm64.msi)
To check the integrity, use `Get-FileHash` cmdlet on PowerShell.
```powershell
-PS> Get-FileHash fluent-bit-2.0.9-win32.exe
+PS> Get-FileHash fluent-bit-2.2.0-win32.exe
```
## Installing from ZIP archive
@@ -101,12 +112,12 @@ Download a ZIP archive from above. There are installers for 32-bit and 64-bit en
Then you need to expand the ZIP archive. You can do this by clicking "Extract All" on Explorer, or if you're using PowerShell, you can use `Expand-Archive` cmdlet.
```powershell
-PS> Expand-Archive fluent-bit-2.0.9-win64.zip
+PS> Expand-Archive fluent-bit-2.2.0-win64.zip
```
The ZIP package contains the following set of files.
-```text
+```
fluent-bit
├── bin
│ ├── fluent-bit.dll
@@ -154,17 +165,13 @@ To halt the process, press CTRL-C in the terminal.
## Installing from EXE installer
-Download an EXE installer from the [download page](https://fluentbit.io/download/).
-It has both 32-bit and 64-bit builds.
-Choose one which is suitable for you.
+Download an EXE installer from the [download page](https://fluentbit.io/download/). It has both 32-bit and 64-bit builds. Choose one which is suitable for you.
-Double-click the EXE installer you've downloaded.
-The installation wizard will automatically start.
+Double-click the EXE installer you've downloaded. The installation wizard will automatically start.
-![Installation wizard screenshot](<../.gitbook/assets/windows_installer (1) (1).png>)
+![Installation wizard screenshot](<../.gitbook/assets/windows\_installer (1) (1).png>)
-Click Next and proceed.
-By default, Fluent Bit is installed into `C:\Program Files\fluent-bit\`, so you should be able to launch fluent-bit as follows after installation.
+Click Next and proceed. By default, Fluent Bit is installed into `C:\Program Files\fluent-bit\`, so you should be able to launch fluent-bit as follows after installation.
```powershell
PS> C:\Program Files\fluent-bit\bin\fluent-bit.exe -i dummy -o stdout
@@ -172,7 +179,7 @@ PS> C:\Program Files\fluent-bit\bin\fluent-bit.exe -i dummy -o stdout
### Installer options
-The Windows installer is built by [`CPack` using NSIS() and so supports the [default options](https://nsis.sourceforge.io/Docs/Chapter3.html#3.2.1) that all NSIS installers do for silent installation and the directory to install to.
+The Windows installer is built by \[`CPack` using NSIS([https://cmake.org/cmake/help/latest/cpack\_gen/nsis.html](https://cmake.org/cmake/help/latest/cpack\_gen/nsis.html)) and so supports the [default options](https://nsis.sourceforge.io/Docs/Chapter3.html#3.2.1) that all NSIS installers do for silent installation and the directory to install to.
To silently install to `C:\fluent-bit` directory here is an example:
@@ -180,8 +187,7 @@ To silently install to `C:\fluent-bit` directory here is an example:
PS> /S /D=C:\fluent-bit
```
-The uninstaller automatically provided also supports a silent un-install using the same `/S` flag.
-This may be useful for provisioning with automation like Ansible, Puppet, etc.
+The uninstaller automatically provided also supports a silent un-install using the same `/S` flag. This may be useful for provisioning with automation like Ansible, Puppet, etc.
## Windows Service Support
@@ -189,7 +195,7 @@ Windows services are equivalent to "daemons" in UNIX (i.e. long-running backgrou
Suppose you have the following installation layout:
-```text
+```
C:\fluent-bit\
├── conf
│ ├── fluent-bit.conf
@@ -226,20 +232,19 @@ To halt the Fluent Bit service, just execute the "stop" command.
To start Fluent Bit automatically on boot, execute the following:
-```text
+```
% sc.exe config fluent-bit start= auto
```
-### [FAQ] Fluent Bit fails to start up when installed under `C:\Program Files`
+### \[FAQ] Fluent Bit fails to start up when installed under `C:\Program Files`
-Quotations are required if file paths contain spaces.
-Here is an example:
+Quotations are required if file paths contain spaces. Here is an example:
-```text
+```
% sc.exe create fluent-bit binpath= "\"C:\Program Files\fluent-bit\bin\fluent-bit.exe\" -c \"C:\Program Files\fluent-bit\conf\fluent-bit.conf\""
```
-### [FAQ] How can I manage Fluent Bit service via PowerShell?
+### \[FAQ] How can I manage Fluent Bit service via PowerShell?
Instead of `sc.exe`, PowerShell can be used to manage Windows services.
@@ -308,6 +313,8 @@ PS> cp -Path C:\WinFlexBison\win_flex.exe C:\WinFlexBison\flex.exe
Add the path `C:\WinFlexBison` to your systems environment variable "Path". [Here's how to do that](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/).
+It is important to have installed OpenSSL binaries, at least the library files and headers.
+
Also you need to install [git](https://git-scm.com/download/win) to pull the source code from the repository.
```powershell
@@ -317,7 +324,10 @@ PS> start git.exe
### Compilation
-Open the start menu on Windows and type "Developer Command Prompt".
+Open the start menu on Windows and type "Command Prompt for VS". From the result list select the one that corresponds to your target system ( x86 or x64).
+
+> **Note:** Check that the installed OpenSSL library files match the selected target. You can check the library files by using the **dumpbin** command with the **/headers** option .
+
Clone the source code of Fluent Bit.
diff --git a/local-testing/logging-pipeline.md b/local-testing/logging-pipeline.md
index 849cc45da..5f3202909 100644
--- a/local-testing/logging-pipeline.md
+++ b/local-testing/logging-pipeline.md
@@ -4,7 +4,7 @@ You may wish to test a logging pipeline locally to observe how it deals with log
## Create a Configuration File
-Refer to the [Configuration File section](https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/configuration-file) to create a configuration to test.
+Refer to the [Configuration File section](https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file) to create a configuration to test.
`fluent-bit.conf`:
diff --git a/pipeline/filters/aws-metadata.md b/pipeline/filters/aws-metadata.md
index a0dcf0378..b7b24e5f7 100644
--- a/pipeline/filters/aws-metadata.md
+++ b/pipeline/filters/aws-metadata.md
@@ -17,6 +17,9 @@ The plugin supports the following configuration parameters:
| account\_id | The account ID for current EC2 instance. | false |
| hostname | The hostname for current EC2 instance. | false |
| vpc\_id | The VPC ID for current EC2 instance. | false |
+| tags\_enabled | Specifies if should attach EC2 instance tags. EC2 instance must have the [instance-metadata-tags](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-instance-metadata-options.html) option enabled (which is disabled by default). | false |
+| tags\_include | Defines list of specific EC2 tag keys to inject into the logs. Tag keys must be separated by "," character. Tags which are not present in this list will be ignored. Example: `Name,tag1,tag2`. | |
+| tags\_exclude | Defines list of specific EC2 tag keys not to inject into the logs. Tag keys must be separated by "," character. Tags which are not present in this list will be injected into the logs. If both `tags_include` and `tags_exclude` are specified, configuration is invalid and plugin fails. Example: `Name,tag1,tag2` | |
Note: _If you run Fluent Bit in a container, you may have to use instance metadata v1._ The plugin behaves the same regardless of which version is used.
@@ -49,9 +52,58 @@ $ bin/fluent-bit -c /PATH_TO_CONF_FILE/fluent-bit.conf
account_id true
hostname true
vpc_id true
+ tags_enabled true
[OUTPUT]
Name stdout
Match *
```
+## EC2 Tags
+
+EC2 Tags are a useful feature that enables you to label and organize your EC2 instances by creating custom-defined key-value pairs. These tags are commonly utilized for resource management, cost allocation, and automation. Consequently, including them in the Fluent Bit generated logs is almost essential.
+
+To achieve this, AWS Filter can be configured with `tags_enabled true` to enable the _tagging_ of logs with the relevant EC2 instance tags. This setup ensures that logs are appropriately tagged, making it easier to manage and analyze them based on specific criteria.
+
+### Requirements
+
+To use the `tags_enabled true` functionality in Fluent Bit, the [instance-metadata-tags](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-instance-metadata-options.html) option must be enabled on the EC2 instance where Fluent Bit is running. Without this option enabled, Fluent Bit will not be able to retrieve the tags associated with the EC2 instance. However, this does not mean that Fluent Bit will fail or stop working altogether. Instead, if [instance-metadata-tags](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-instance-metadata-options.html) option is not enabled, Fluent Bit will continue to operate normally and capture other values, such as the EC2 instance ID or availability zone, based on its configuration.
+
+### Example
+
+#### tags_include
+
+Assume that our EC2 instance has many tags, some of which have lengthy values that are irrelevant to the logs we want to collect. Only two tags, `department` and `project`, seem to be valuable for our purpose. Here is a configuration which reflects this requirement:
+
+```
+[FILTER]
+ Name aws
+ Match *
+ tags_enabled true
+ tags_include department,project
+```
+
+If we run Fluent Bit, what will the logs look like? Here is an example of what the logs might contain:
+```
+{"log"=>"fluentbit is awesome", "az"=>"us-east-1a", "ec2_instance_id"=>"i-0e66fc7f9809d7168", "department"=>"it", "project"=>"fluentbit"}
+```
+
+#### tags_exclude
+
+Suppose our EC2 instance has three tags: `Name:fluent-bit-docs-example`, `project:fluentbit`, and `department:it`. In this example, we want to exclude the `department` tag since we consider it redundant. This is because all of our projects belong to the `it` department, and we do not need to waste storage space on redundant labels.
+
+Here is an example configuration that achieves this:
+
+```
+[FILTER]
+ Name aws
+ Match *
+ tags_enabled true
+ tags_exclude department
+```
+
+The resulting logs might look like this:
+
+```
+{"log"=>"aws is awesome", "az"=>"us-east-1a", "ec2_instance_id"=>"i-0e66fc7f9809d7168", "Name"=>"fluent-bit-docs-example", "project"=>"fluentbit"}
+```
diff --git a/pipeline/filters/ecs-metadata.md b/pipeline/filters/ecs-metadata.md
index ce154755f..e2d6f567b 100644
--- a/pipeline/filters/ecs-metadata.md
+++ b/pipeline/filters/ecs-metadata.md
@@ -116,7 +116,8 @@ The output log would be similar to:
}
```
-Notice that the template variables in the value for the `resource` key are separated by dot characters. Please see the section below about limitations in which characters can be used to separate template variables.
+Notice that the template variables in the value for the `resource` key are separated by dot characters, only dots and commas
+ (`.` and `,`) can come after a template variable. For more information, please check the [Record accessor limitation's section](../../administration/configuring-fluent-bit/classic-mode/record-accessor.md#limitations-of-record_accessor-templating).
#### Example 3: Attach cluster metadata to non-container logs
@@ -148,23 +149,3 @@ This examples shows a use case for the `Cluster_Metadata_Only` option- attaching
Format json_lines
```
-### Limitations of record_accessor templating
-
-Notice in example 2, that the template values are separated by dot characters. This is important; the Fluent Bit record_accessor library has a limitation in the characters that can separate template variables- only dots and commas (`.` and `,`) can come after a template variable. This is because the templating library must parse the template and determine the end of a variable.
-
-The following would be invalid templates because the two template variables are not separated by commas or dots:
-
-- `$TaskID-$ECSContainerName`
-- `$TaskID/$ECSContainerName`
-- `$TaskID_$ECSContainerName`
-- `$TaskIDfooo$ECSContainerName`
-
-However, the following are valid:
-- `$TaskID.$ECSContainerName`
-- `$TaskID.ecs_resource.$ECSContainerName`
-- `$TaskID.fooo.$ECSContainerName`
-
-And the following are valid since they only contain one template variable with nothing after it:
-- `fooo$TaskID`
-- `fooo____$TaskID`
-- `fooo/bar$TaskID`
diff --git a/pipeline/filters/grep.md b/pipeline/filters/grep.md
index 0fd3c3993..f07b4bd18 100644
--- a/pipeline/filters/grep.md
+++ b/pipeline/filters/grep.md
@@ -14,6 +14,7 @@ The plugin supports the following configuration parameters:
| :--- | :--- | :--- |
| Regex | KEY REGEX | Keep records in which the content of KEY matches the regular expression. |
| Exclude | KEY REGEX | Exclude records in which the content of KEY matches the regular expression. |
+| Logical_Op| Operation | Specify which logical operator to use. `AND` , `OR` and `legacy` are allowed as an Operation. Default is `legacy` for backward compatibility. In `legacy` mode the behaviour is either AND or OR depending whether the `grep` is including (uses AND) or excluding (uses OR). Only available from 2.1+. |
#### Record Accessor Enabled
@@ -114,3 +115,46 @@ Here is an example that checks for a specific valid value for the key as well:
```
The specified key `iot_timestamp` must match the expected expression - if it does not or is missing/empty then it will be excluded.
+
+### Multiple conditions
+
+If you want to set multiple `Regex` or `Exclude`, you can use `Logical_Op` property to use logical conjuction or disjunction.
+
+Note: If `Logical_Op` is set, setting both 'Regex' and `Exclude` results in an error.
+
+```python
+[INPUT]
+ Name dummy
+ Dummy {"endpoint":"localhost", "value":"something"}
+ Tag dummy
+
+[FILTER]
+ Name grep
+ Match *
+ Logical_Op or
+ Regex value something
+ Regex value error
+
+[OUTPUT]
+ Name stdout
+```
+
+Output will be
+```
+Fluent Bit v2.0.9
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
+* https://fluentbit.io
+
+[2023/01/22 09:46:49] [ info] [fluent bit] version=2.0.9, commit=16eae10786, pid=33268
+[2023/01/22 09:46:49] [ info] [storage] ver=1.2.0, type=memory, sync=normal, checksum=off, max_chunks_up=128
+[2023/01/22 09:46:49] [ info] [cmetrics] version=0.5.8
+[2023/01/22 09:46:49] [ info] [ctraces ] version=0.2.7
+[2023/01/22 09:46:49] [ info] [input:dummy:dummy.0] initializing
+[2023/01/22 09:46:49] [ info] [input:dummy:dummy.0] storage_strategy='memory' (memory only)
+[2023/01/22 09:46:49] [ info] [filter:grep:grep.0] OR mode
+[2023/01/22 09:46:49] [ info] [sp] stream processor started
+[2023/01/22 09:46:49] [ info] [output:stdout:stdout.0] worker #0 started
+[0] dummy: [1674348410.558341857, {"endpoint"=>"localhost", "value"=>"something"}]
+[0] dummy: [1674348411.546425499, {"endpoint"=>"localhost", "value"=>"something"}]
+```
\ No newline at end of file
diff --git a/pipeline/filters/kubernetes.md b/pipeline/filters/kubernetes.md
index 8b6d08069..92a37583f 100644
--- a/pipeline/filters/kubernetes.md
+++ b/pipeline/filters/kubernetes.md
@@ -228,7 +228,7 @@ metadata:
name: fluentbitds
namespace: fluentbit-system
---
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: fluentbit
@@ -244,7 +244,7 @@ rules:
- list
- watch
---
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: fluentbit
diff --git a/pipeline/filters/log_to_metrics.md b/pipeline/filters/log_to_metrics.md
new file mode 100644
index 000000000..5dbc017d7
--- /dev/null
+++ b/pipeline/filters/log_to_metrics.md
@@ -0,0 +1,210 @@
+---
+description: Generate metrics from logs
+---
+
+# Log To Metrics
+
+The _Log To Metrics Filter_ plugin allows you to generate log-derived metrics. It currently supports modes to count records, provide a gauge for field values or create a histogram. You can also match or exclude specific records based on regular expression patterns for values or nested values. This filter plugin does not actually act as a record filter and does not change or drop records. All records will pass this filter untouched and generated metrics will be emitted into a seperate metric pipeline.
+
+_Please note that this plugin is an experimental feature and is not recommended for production use. Configuration parameters and plugin functionality are subject to change without notice._
+
+
+## Configuration Parameters
+
+The plugin supports the following configuration parameters:
+
+| Key | Description | Mandatory | Value Format
+| :--- | :--- | :--- | :---
+| tag | Defines the tag for the generated metrics record| Yes | |
+| metric_mode | Defines the mode for the metric. Valid values are [`counter`, `gauge` or `histogram`] | Yes | |
+| metric_name | Sets the name of the metric. | Yes | |
+| metric_description | Sets a help text for the metric. | Yes | |
+| bucket | Defines a bucket for `histogram` | Yes, for mode `histogram` | e.g. 0.75 |
+| add_label | Add a custom label NAME and set the value to the value of KEY | | | NAME KEY |
+| label_field | Includes a record field as label dimension in the metric. | | Name of record key. Supports [Record Accessor](../../administration/configuring-fluent-bit/classic-mode/record-accessor.md) notation for nested fields.
+| value_field | Specify the record field that holds a numerical value | Yes, for modes [`gauge` and `histogram`] | Name of record key. Supports [Record Accessor](../../administration/configuring-fluent-bit/classic-mode/record-accessor.md) notation for nested fields.
+| kubernetes_mode | If enabled, it will automatically put pod_id, pod_name, namespace_name, docker_id and container_name into the metric as labels. This option is intended to be used in combination with the [kubernetes](./kubernetes.md) filter plugin, which fills those fields. | |
+| Regex | Include records in which the content of KEY matches the regular expression. | | KEY REGEX
+| Exclude | Exclude records in which the content of KEY matches the regular expression. | | KEY REGEX
+
+## Getting Started
+
+The following example takes records from two dummy inputs and counts all messages passing through the `log_to_metrics` filter. It then generates metric records which are provided to the `prometheus_exporter`:
+
+### Configuration - Counter
+
+```python
+[SERVICE]
+ flush 1
+ log_level info
+
+[INPUT]
+ Name dummy
+ Dummy {"message":"dummy", "kubernetes":{"namespace_name": "default", "docker_id": "abc123", "pod_name": "pod1", "container_name": "mycontainer", "pod_id": "def456", "labels":{"app": "app1"}}, "duration": 20, "color": "red", "shape": "circle"}
+ Tag dummy.log
+
+[INPUT]
+ Name dummy
+ Dummy {"message":"hello", "kubernetes":{"namespace_name": "default", "docker_id": "abc123", "pod_name": "pod1", "container_name": "mycontainer", "pod_id": "def456", "labels":{"app": "app1"}}, "duration": 60, "color": "blue", "shape": "square"}
+ Tag dummy.log2
+
+[FILTER]
+ name log_to_metrics
+ match dummy.log*
+ tag test_metric
+ metric_mode counter
+ metric_name count_all_dummy_messages
+ metric_description This metric counts dummy messages
+
+[OUTPUT]
+ name prometheus_exporter
+ match *
+ host 0.0.0.0
+ port 2021
+```
+
+You can then use e.g. curl command to retrieve the generated metric:
+```text
+> curl -s http://127.0.0.1:2021/metrics
+
+
+# HELP log_metric_counter_count_all_dummy_messages This metric counts dummy messages
+# TYPE log_metric_counter_count_all_dummy_messages counter
+log_metric_counter_count_all_dummy_messages 49
+```
+
+### Configuration - Gauge
+
+The `gauge` mode needs a `value_field` specified, where the current metric values are generated from. In this example we also apply a regex filter and enable the `kubernetes_mode` option:
+```python
+[FILTER]
+ name log_to_metrics
+ match dummy.log*
+ tag test_metric
+ metric_mode gauge
+ metric_name current_duration
+ metric_description This metric shows the current duration
+ value_field duration
+ kubernetes_mode on
+ regex message .*el.*
+ add_label app $kubernetes['labels']['app']
+ label_field color
+ label_field shape
+```
+You can then use e.g. curl command to retrieve the generated metric:
+```text
+> curl -s http://127.0.0.1:2021/metrics
+
+
+# HELP log_metric_gauge_current_duration This metric shows the current duration
+# TYPE log_metric_gauge_current_duration gauge
+log_metric_gauge_current_duration{namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="square"} 60
+```
+
+As you can see in the output, only one line is printed, as the records from the first input plugin are ignored, as they do not match the regex.
+
+The filter also allows to use multiple rules which are applied in order, you can have many _Regex_ and _Exclude_ entries as required (see [grep](./grep.md) filter plugin).
+
+If you execute the above `curl` command multiple times, you see, that in this example the metric value stays at `60`, as the messages generated by the dummy plugin are not changing. In a real-world scenario the values would change and return the last processed value.
+
+
+#### Metric label_values
+As you can see, the label sets defined by `add_label` and `label_field` are added to the metric. The lines in the metric represent every combination of labels. Only actually used combinations are displayed here. To see this, you can add a dummy `dummy` input to your configuration.
+
+The metric output would then look like:
+```text
+> curl -s http://127.0.0.1:2021/metrics
+
+# HELP log_metric_gauge_current_duration This metric shows the current duration
+# TYPE log_metric_gauge_current_duration gauge
+log_metric_gauge_current_duration{namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="square"} 60
+log_metric_gauge_current_duration{namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 20
+
+```
+
+You can also see, that all the kubernetes labels have been attached to the metric, accordingly.
+
+### Configuration - Histogram
+
+Similar to the `gauge` mode, `histogram` needs a `value_field` specified, where the current metric values are generated from. In this example we also apply a regex filter and enable the `kubernetes_mode` option:
+```python
+[FILTER]
+ name log_to_metrics
+ match dummy.log*
+ tag test_metric
+ metric_mode histogram
+ metric_name current_duration
+ metric_description This metric shows the request duration
+ value_field duration
+ kubernetes_mode on
+ regex message .*el.*
+ add_label app $kubernetes['labels']['app']
+ label_field color
+ label_field shape
+```
+You can then use e.g. curl command to retrieve the generated metric:
+```text
+> curl -s http://127.0.0.1:2021/metrics
+
+
+# HELP log_metric_histogram_current_duration This metric shows the request duration
+# TYPE log_metric_histogram_current_duration histogram
+log_metric_histogram_current_duration_bucket{le="0.005",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.01",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.025",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.05",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.1",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.25",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.5",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="1.0",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="2.5",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="5.0",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="10.0",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="+Inf",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 28
+log_metric_histogram_current_duration_sum{namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 560
+log_metric_histogram_current_duration_count{namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="red",shape="circle"} 28
+log_metric_histogram_current_duration_bucket{le="0.005",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.01",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.025",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.05",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.1",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.25",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="0.5",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="1.0",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="2.5",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="5.0",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="10.0",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 0
+log_metric_histogram_current_duration_bucket{le="+Inf",namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 27
+log_metric_histogram_current_duration_sum{namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 1620
+log_metric_histogram_current_duration_count{namespace_name="default",pod_name="pod1",container_name="mycontainer",docker_id="abc123",pod_id="def456",app="app1",color="blue",shape="circle"} 27
+```
+
+As you can see in the output, there are per default the buckets `0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0` and `+Inf`, in which values are sorted into. A sum and a counter are also part of this metric. You can specify own buckets in the config, like in the following example:
+
+```python
+[FILTER]
+ name log_to_metrics
+ match dummy.log*
+ tag test_metric
+ metric_mode histogram
+ metric_name current_duration
+ metric_description This metric shows the HTTP request duration as histogram in milliseconds
+ value_field duration
+ kubernetes_mode on
+ bucket 1
+ bucket 5
+ bucket 10
+ bucket 50
+ bucket 100
+ bucket 250
+ bucket 500
+ bucket 1000
+ regex message .*el.*
+ label_field color
+ label_field shape
+```
+
+Please note, that the `+Inf` bucket will always be included implicitly. The buckets in a histogram are cumulative, so a value added to one bucket will add to all larger buckets, too.
+
+
+You can also see, that all the kubernetes labels have been attached to the metric, idential to the behavior of `label_field` described in [the previous chapter](#metric-label_values). That results in two sets for the histogram.
\ No newline at end of file
diff --git a/pipeline/filters/lua.md b/pipeline/filters/lua.md
index d20e6cf04..2ef8e9872 100644
--- a/pipeline/filters/lua.md
+++ b/pipeline/filters/lua.md
@@ -20,6 +20,7 @@ The plugin supports the following configuration parameters:
| protected\_mode | If enabled, Lua script will be executed in protected mode. It prevents Fluent Bit from crashing when invalid Lua script is executed or the triggered Lua function throws exceptions. Default is true. |
| time\_as\_table | By default when the Lua script is invoked, the record timestamp is passed as a *floating number* which might lead to precision loss when it is converted back. If you desire timestamp precision, enabling this option will pass the timestamp as a Lua table with keys `sec` for seconds since epoch and `nsec` for nanoseconds. |
| code | Inline LUA code instead of loading from a path via `script`. |
+| enable_flb_null| If enabled, null will be converted to flb_null in Lua. It is useful to prevent removing key/value since nil is a special value to remove key value from map in Lua. Default is false. |
## Getting Started
@@ -108,24 +109,24 @@ service:
pipeline:
inputs:
- - random:
- tag: test
- samples: 10
+ - name: random
+ tag: test
+ samples: 10
filters:
- - lua:
- match: "*"
- call: append_tag
- code: |
- function append_tag(tag, timestamp, record)
- new_record = record
- new_record["tag"] = tag
- return 1, timestamp, new_record
- end
+ - name: lua
+ match: "*"
+ call: append_tag
+ code: |
+ function append_tag(tag, timestamp, record)
+ new_record = record
+ new_record["tag"] = tag
+ return 1, timestamp, new_record
+ end
outputs:
- - stdout:
- match: "*"
+ - name: stdout
+ match: "*"
```
In classic mode:
diff --git a/pipeline/filters/multiline-stacktrace.md b/pipeline/filters/multiline-stacktrace.md
index f6f80e446..1db4a242d 100644
--- a/pipeline/filters/multiline-stacktrace.md
+++ b/pipeline/filters/multiline-stacktrace.md
@@ -110,7 +110,7 @@ This second file defines a multiline parser for the example. Note that a second
#
# rules | state name | regex pattern | next state
# ------|---------------|--------------------------------------------
- rule "start_state" "/(Dec \d+ \d+\:\d+\:\d+)(.*)/" "cont"
+ rule "start_state" "/([A-Za-z]+ \d+ \d+\:\d+\:\d+)(.*)/" "cont"
rule "cont" "/^\s+at.*/" "cont"
```
diff --git a/pipeline/filters/parser.md b/pipeline/filters/parser.md
index 14372ad0c..b0ff976b1 100644
--- a/pipeline/filters/parser.md
+++ b/pipeline/filters/parser.md
@@ -138,19 +138,21 @@ If you enable `Reserved_Data` and `Preserve_Key`, the original key field will be
Parser dummy_test
Reserve_Data On
Preserve_Key On
+
+[OUTPUT]
+ Name stdout
+ Match *
```
This will produce the following output:
```text
$ fluent-bit -c dummy.conf
-Fluent-Bit v0.12.0
-Copyright (C) Treasure Data
-
-[2017/07/06 22:33:12] [ info] [engine] started
-[0] dummy.data: [1499347993.001371317, {"data":"100 0.5 true This is example", "INT"=>"100", "FLOAT"=>"0.5", "BOOL"=>"true", "STRING"=>"This is example"}]
-[1] dummy.data: [1499347994.001303118, {"data":"100 0.5 true This is example", "INT"=>"100", "FLOAT"=>"0.5", "BOOL"=>"true", "STRING"=>"This is example"}]
-[2] dummy.data: [1499347995.001296133, {"data":"100 0.5 true This is example", "INT"=>"100", "FLOAT"=>"0.5", "BOOL"=>"true", "STRING"=>"This is example"}]
-[3] dummy.data: [1499347996.001320284, {"data":"100 0.5 true This is example", "INT"=>"100", "FLOAT"=>"0.5", "BOOL"=>"true", "STRING"=>"This is example"}]
+Fluent Bit v2.1.1
+* Copyright (C) 2015-2022 The Fluent Bit Authors
+...
+...
+[0] dummy.data: [[1687122778.299116136, {}], {"INT"=>"100", "FLOAT"=>"0.5", "BOOL"=>"true", "STRING"=>"This is example", "data"=>"100 0.5 true This is example", "key1"=>"value1", "key2"=>"value2"}]
+[0] dummy.data: [[1687122779.296906553, {}], {"INT"=>"100", "FLOAT"=>"0.5", "BOOL"=>"true", "STRING"=>"This is example", "data"=>"100 0.5 true This is example", "key1"=>"value1", "key2"=>"value2"}]
+[0] dummy.data: [[1687122780.297475803, {}], {"INT"=>"100", "FLOAT"=>"0.5", "BOOL"=>"true", "STRING"=>"This is example", "data"=>"100 0.5 true This is example", "key1"=>"value1", "key2"=>"value2"}]
```
-
diff --git a/pipeline/filters/sysinfo.md b/pipeline/filters/sysinfo.md
new file mode 100644
index 000000000..45936ac1b
--- /dev/null
+++ b/pipeline/filters/sysinfo.md
@@ -0,0 +1,50 @@
+# Sysinfo
+
+The _Sysinfo Filter_ plugin allows to append system information like fluent-bit version or hostname.
+
+## Configuration Prameters
+
+The plugin supports the following configuration parameters:
+
+|Key|Description|Supported platform|
+|---|---|---|
+|fluentbit_version_key|Specify the key name for fluent-bit version.| All |
+|os_name_key|Specify the key name for os name. e.g. linux, win64 or macos.| All |
+|hostname_key|Specify the key name for hostname.| All|
+|os_version_key|Specify the key name for os version. It is not supported on some platforms. | Linux |
+|kernel_version_key|Specify the key name for kernel version. It is not supported on some platforms.| Linux |
+
+Some properties are supported by specific platform.
+
+## Getting Started
+
+In order to start filtering records, you can run the filter from the command line or through the configuration file.
+
+The following configuration file is to append fluent-bit version and OS name.
+
+```
+[INPUT]
+ Name dummy
+ Tag test
+
+[FILTER]
+ Name sysinfo
+ Match *
+ Fluentbit_version_key flb_ver
+ Os_name_key os_name
+
+[OUTPUT]
+ name stdout
+ match *
+```
+
+You can also run the filter from command line.
+
+```
+fluent-bit -i dummy -o stdout -F sysinfo -m '*' -p fluentbit_version_key=flb_ver -p os_name_key=os_name
+```
+
+The output will be
+```
+[0] dummy.0: [[1699172858.989654355, {}], {"message"=>"dummy", "flb_ver"=>"2.2.0", "os_name"=>"linux"}]
+```
\ No newline at end of file
diff --git a/pipeline/inputs/dummy.md b/pipeline/inputs/dummy.md
index 8bf903300..bceb24a85 100644
--- a/pipeline/inputs/dummy.md
+++ b/pipeline/inputs/dummy.md
@@ -9,9 +9,12 @@ The plugin supports the following configuration parameters:
| Key | Description |
| :--- | :--- |
| Dummy | Dummy JSON record. Default: `{"message":"dummy"}` |
+| Metadata | Dummy JSON metadata. Default: `{}` |
| Start\_time\_sec | Dummy base timestamp in seconds. Default: 0 |
| Start\_time\_nsec | Dummy base timestamp in nanoseconds. Default: 0 |
| Rate | Rate at which messages are generated expressed in how many times per second. Default: 1 |
+| Interval\_sec | Set seconds of time interval at which every message is generated. If set, `Rate` configuration will be ignored. Default: 0 |
+| Interval\_nsec | Set nanoseconds of time interval at which every message is generated. If set, `Rate` configuration will be ignored. Default: 0 |
| Samples | If set, the events number will be limited. e.g. If Samples=3, the plugin only generates three events and stops. |
| Copies | Number of messages to generate each time they are generated. Defaults to 1. |
@@ -23,30 +26,42 @@ You can run the plugin from the command line or through the configuration file:
```bash
$ fluent-bit -i dummy -o stdout
-Fluent Bit v1.x.x
-* Copyright (C) 2019-2020 The Fluent Bit Authors
-* Copyright (C) 2015-2018 Treasure Data
+Fluent Bit v2.x.x
+* Copyright (C) 2015-2022 The Fluent Bit Authors
* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
* https://fluentbit.io
-[2017/07/06 21:55:29] [ info] [engine] started
-[0] dummy.0: [1499345730.015265366, {"message"=>"dummy"}]
-[1] dummy.0: [1499345731.002371371, {"message"=>"dummy"}]
-[2] dummy.0: [1499345732.000267932, {"message"=>"dummy"}]
-[3] dummy.0: [1499345733.000757746, {"message"=>"dummy"}]
+[0] dummy.0: [[1686451466.659962491, {}], {"message"=>"dummy"}]
+[0] dummy.0: [[1686451467.659679509, {}], {"message"=>"dummy"}]
```
### Configuration File
In your main configuration file append the following _Input_ & _Output_ sections:
-```python
+
+{% tabs %}
+{% tab title="fluent-bit.conf" %}
+```text
[INPUT]
Name dummy
- Tag dummy.log
+ Dummy {"message": "custom dummy"}
[OUTPUT]
Name stdout
Match *
```
+{% endtab %}
+{% tab title="fluent-bit.yaml" %}
+```yaml
+pipeline:
+ inputs:
+ - name: dummy
+ dummy: '{"message": "custom dummy"}'
+ outputs:
+ - name: stdout
+ match: '*'
+```
+{% endtab %}
+{% endtabs %}
diff --git a/pipeline/inputs/elasticsearch.md b/pipeline/inputs/elasticsearch.md
new file mode 100644
index 000000000..32eefd13e
--- /dev/null
+++ b/pipeline/inputs/elasticsearch.md
@@ -0,0 +1,89 @@
+# Elasticsearch (Bulk API)
+
+The **elasticsearch** input plugin handles both Elasticsearch and OpenSearch Bulk API requests.
+
+## Configuration Parameters
+
+The plugin supports the following configuration parameters:
+
+| Key | Description | Default value |
+| :--- | :--- | :--- |
+| buffer\_max\_size | Set the maximum size of buffer. | 4M |
+| buffer\_chunk\_size | Set the buffer chunk size. | 512K |
+| tag\_key | Specify a key name for extracting as a tag. | `NULL` |
+| meta\_key | Specify a key name for meta information. | "@meta" |
+| hostname | Specify hostname or FQDN. This parameter can be used for "sniffing" (auto-discovery of) cluster node information. | "localhost" |
+| version | Specify Elasticsearch server version. This parameter is effective for checking a version of Elasticsearch/OpenSearch server version. | "8.0.0" |
+
+**Note:** The Elasticsearch cluster uses "sniffing" to optimize the connections between its cluster and clients.
+Elasticsearch can build its cluster and dynamically generate a connection list which is called "sniffing".
+The `hostname` will be used for sniffing information and this is handled by the sniffing endpoint.
+
+## Getting Started
+
+In order to start performing the checks, you can run the plugin from the command line or through the configuration file:
+
+### Command Line
+
+From the command line you can configure Fluent Bit to handle Bulk API requests with the following options:
+
+```bash
+$ fluent-bit -i elasticsearch -p port=9200 -o stdout
+```
+
+### Configuration File
+
+In your main configuration file append the following _Input_ & _Output_ sections:
+
+```python
+[INPUT]
+ name elasticsearch
+ listen 0.0.0.0
+ port 9200
+
+[OUTPUT]
+ name stdout
+ match *
+```
+
+As described above, the plugin will handle ingested Bulk API requests.
+For large bulk ingestions, you may have to increase buffer size with **buffer_max_size** and **buffer_chunk_size** parameters:
+
+```python
+[INPUT]
+ name elasticsearch
+ listen 0.0.0.0
+ port 9200
+ buffer_max_size 20M
+ buffer_chunk_size 5M
+
+[OUTPUT]
+ name stdout
+ match *
+```
+
+#### Ingesting from beats series
+
+Ingesting from beats series agents is also supported.
+For example, [Filebeats](https://www.elastic.co/beats/filebeat), [Metricbeat](https://www.elastic.co/beats/metricbeat), and [Winlogbeat](https://www.elastic.co/beats/winlogbeat) are able to ingest their collected data through this plugin.
+
+Note that Fluent Bit's node information is returning as Elasticsearch 8.0.0.
+
+So, users have to specify the following configurations on their beats configurations:
+
+```yaml
+output.elasticsearch:
+ allow_older_versions: true
+ ilm: false
+```
+
+For large log ingestion on these beat plugins,
+users might have to configure rate limiting on those beats plugins
+when Fluent Bit indicates that the application is exceeding the size limit for HTTP requests:
+
+
+```yaml
+processors:
+ - rate_mimit:
+ limit: "200/s"
+```
diff --git a/pipeline/inputs/exec.md b/pipeline/inputs/exec.md
index 222f6461d..63d2edb54 100644
--- a/pipeline/inputs/exec.md
+++ b/pipeline/inputs/exec.md
@@ -2,9 +2,13 @@
The **exec** input plugin, allows to execute external program and collects event logs.
+**WARNING**: Because this plugin invokes commands via a shell, its inputs are
+subject to shell metacharacter substitution. Careless use of untrusted input in
+command arguments could lead to malicious command execution.
+
## Container support
-This plugin will not function in the distroless production images (AMD64 currently) as it needs a functional `/bin/sh` which is not present.
+This plugin will not function in all the distroless production images as it needs a functional `/bin/sh` which is not present.
The debug images use the same binaries so even though they have a shell, there is no support for this plugin as it is compiled out.
## Configuration Parameters
@@ -13,12 +17,14 @@ The plugin supports the following configuration parameters:
| Key | Description |
| :--- | :--- |
-| Command | The command to execute. |
+| Command | The command to execute, passed to [popen(...)](https://man7.org/linux/man-pages/man3/popen.3.html) without any additional escaping or processing. May include pipelines, redirection, command-substitution, etc. |
| Parser | Specify the name of a parser to interpret the entry as a structured message. |
| Interval\_Sec | Polling interval \(seconds\). |
| Interval\_NSec | Polling interval \(nanosecond\). |
| Buf\_Size | Size of the buffer \(check [unit sizes](https://docs.fluentbit.io/manual/configuration/unit_sizes) for allowed values\) |
| Oneshot | Only run once at startup. This allows collection of data precedent to fluent-bit's startup (bool, default: false) |
+| Exit\_After\_Oneshot | Exit as soon as the one-shot command exits. This allows the exec plugin to be used as a wrapper for another command, sending the target command's output to any fluent-bit sink(s) then exiting. (bool, default: false) |
+| Propagate\_Exit\_Code | When exiting due to Exit\_After\_Oneshot, cause fluent-bit to exit with the exit code of the command exited by this plugin. Follows [shell conventions for exit code propagation](https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html). (bool, default: false) |
## Getting Started
@@ -64,3 +70,83 @@ In your main configuration file append the following _Input_ & _Output_ sections
Name stdout
Match *
```
+
+## Use as a command wrapper
+
+To use `fluent-bit` with the `exec` plugin to wrap another command, use the
+`Exit_After_Oneshot` and `Propagate_Exit_Code` options, e.g.:
+
+```
+[INPUT]
+ Name exec
+ Tag exec_oneshot_demo
+ Command for s in $(seq 1 10); do echo "count: $s"; sleep 1; done; exit 1
+ Oneshot true
+ Exit_After_Oneshot true
+ Propagate_Exit_Code true
+
+[OUTPUT]
+ Name stdout
+ Match *
+```
+
+`fluent-bit` will output
+
+```
+[0] exec_oneshot_demo: [[1681702172.950574027, {}], {"exec"=>"count: 1"}]
+[1] exec_oneshot_demo: [[1681702173.951663666, {}], {"exec"=>"count: 2"}]
+[2] exec_oneshot_demo: [[1681702174.953873724, {}], {"exec"=>"count: 3"}]
+[3] exec_oneshot_demo: [[1681702175.955760865, {}], {"exec"=>"count: 4"}]
+[4] exec_oneshot_demo: [[1681702176.956840282, {}], {"exec"=>"count: 5"}]
+[5] exec_oneshot_demo: [[1681702177.958292246, {}], {"exec"=>"count: 6"}]
+[6] exec_oneshot_demo: [[1681702178.959508200, {}], {"exec"=>"count: 7"}]
+[7] exec_oneshot_demo: [[1681702179.961715745, {}], {"exec"=>"count: 8"}]
+[8] exec_oneshot_demo: [[1681702180.963924140, {}], {"exec"=>"count: 9"}]
+[9] exec_oneshot_demo: [[1681702181.965852990, {}], {"exec"=>"count: 10"}]
+```
+
+then exit with exit code 1.
+
+Translation of command exit code(s) to `fluent-bit` exit code follows
+[the usual shell rules for exit code handling](https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html).
+Like with a shell, there is no way to differentiate between the command exiting
+on a signal and the shell exiting on a signal, and no way to differentiate
+between normal exits with codes greater than 125 and abnormal or signal exits
+reported by fluent-bit or the shell. Wrapped commands should use exit codes
+between 0 and 125 inclusive to allow reliable identification of normal exit.
+If the command is a pipeline, the exit code will be the exit code of the last
+command in the pipeline unless overridden by shell options.
+
+### Parsing command output
+
+By default the `exec` plugin emits one message per command output line, with a
+single field `exec` containing the full message. Use the `Parser` directive to
+specify the name of a parser configuration to use to process the command input.
+
+### Security concerns
+
+**Take great care with shell quoting and escaping when wrapping commands**.
+A script like
+
+```bash
+#!/bin/bash
+# This is a DANGEROUS example of what NOT to do, NEVER DO THIS
+exec fluent-bit \
+ -o stdout \
+ -i exec \
+ -p exit_after_oneshot=true \
+ -p propagate_exit_code=true \
+ -p command='myscript $*'
+```
+
+can ruin your day if someone passes it the argument
+`$(rm -rf /my/important/files; echo "deleted your stuff!")'`
+
+The above script would be safer if written with:
+
+```bash
+ -p command='echo '"$(printf '%q' "$@")" \
+```
+
+... but it's generally best to avoid dynamically generating the command or
+handling untrusted arguments to it at all.
diff --git a/pipeline/inputs/forward.md b/pipeline/inputs/forward.md
index 7d81b4bb2..80eaa3502 100644
--- a/pipeline/inputs/forward.md
+++ b/pipeline/inputs/forward.md
@@ -16,7 +16,7 @@ The plugin supports the following configuration parameters:
| Buffer\_Max\_Size | Specify the maximum buffer memory size used to receive a Forward message. The value must be according to the [Unit Size](../../administration/configuring-fluent-bit/unit-sizes.md) specification. | 6144000 |
| Buffer\_Chunk\_Size | By default the buffer to store the incoming Forward messages, do not allocate the maximum memory allowed, instead it allocate memory when is required. The rounds of allocations are set by _Buffer\_Chunk\_Size_. The value must be according to the [Unit Size ](../../administration/configuring-fluent-bit/unit-sizes.md)specification. | 1024000 |
| Tag_Prefix | Prefix incoming tag with the defined value.| |
-
+| Tag | Override the tag of the forwarded events with the defined value.| |
## Getting Started
diff --git a/pipeline/inputs/http.md b/pipeline/inputs/http.md
index 410fa41b2..aead92d9d 100644
--- a/pipeline/inputs/http.md
+++ b/pipeline/inputs/http.md
@@ -6,14 +6,19 @@ description: The HTTP input plugin allows you to send custom records to an HTTP
## Configuration Parameters
-| **Key** | Description | default |
-| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
-| listen | The address to listen on | 0.0.0.0 |
-| port | The port for Fluent Bit to listen on | 9880 |
-| tag_key | Specify the key name to overwrite a tag. If set, the tag will be overwritten by a value of the key. | |
-| buffer_max_size | Specify the maximum buffer size in KB to receive a JSON message. | 4M |
-| buffer_chunk_size | This sets the chunk size for incoming incoming JSON messages. These chunks are then stored/managed in the space available by buffer_max_size. | 512K |
-|successful_response_code | It allows to set successful response code. `200`, `201` and `204` are supported.| 201 |
+| **Key** | Description | default |
+|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|---------|
+| listen | The address to listen on | 0.0.0.0 |
+| port | The port for Fluent Bit to listen on | 9880 |
+| tag_key | Specify the key name to overwrite a tag. If set, the tag will be overwritten by a value of the key. | |
+| buffer_max_size | Specify the maximum buffer size in KB to receive a JSON message. | 4M |
+| buffer_chunk_size | This sets the chunk size for incoming incoming JSON messages. These chunks are then stored/managed in the space available by buffer_max_size. | 512K |
+| successful_response_code | It allows to set successful response code. `200`, `201` and `204` are supported. | 201 |
+| success_header | Add an HTTP header key/value pair on success. Multiple headers can be set. Example: `X-Custom custom-answer` | |
+
+### TLS / SSL
+
+HTTP input plugin supports TTL/SSL, for more details about the properties available and general configuration, please refer to the [Transport Security](../../administration/transport-security.md) section.
## Getting Started
@@ -21,11 +26,90 @@ The http input plugin allows Fluent Bit to open up an HTTP port that you can the
[Link to video](https://asciinema.org/a/375571)
-**How to set tag**
+#### How to set tag
+
+The tag for the HTTP input plugin is set by adding the tag to the end of the request URL. This tag is then used to route the event through the system.
+For example, in the following curl message below the tag set is `app.log**. **` because the end end path is `/app_log`:
+
+### Curl request
+
+```
+curl -d '{"key1":"value1","key2":"value2"}' -XPOST -H "content-type: application/json" http://localhost:8888/app.log
+```
+
+### Configuration File
+
+```
+[INPUT]
+ name http
+ listen 0.0.0.0
+ port 8888
+
+[OUTPUT]
+ name stdout
+ match app.log
+```
+
+If you do not set the tag `http.0` is automatically used. If you have multiple HTTP inputs then they will follow a pattern of `http.N` where N is an integer representing the input.
+
+### Curl request
+
+```
+curl -d '{"key1":"value1","key2":"value2"}' -XPOST -H "content-type: application/json" http://localhost:8888
+```
+
+### Configuration File
+
+```
+[INPUT]
+ name http
+ listen 0.0.0.0
+ port 8888
+
+[OUTPUT]
+ name stdout
+ match http.0
+```
+
+
+#### How to set tag_key
+
+The tag_key configuration option allows to specify the key name that will be used to overwrite a tag. The tag's value will be replaced with the value associated with the specified key. For example, setting tag_key to "custom_tag" and the log event contains a json field with the key "custom_tag" Fluent Bit will use the value of that field as the new tag for routing the event through the system.
+
+### Curl request
-The tag for the HTTP input plugin is set by adding the tag to the end of the request URL. This tag is then used to route the event through the system. For example, in the following curl message below the tag set is `app.log`**. **If you do not set the tag `http.0` is automatically used. If you have multiple HTTP inputs then they will follow a pattern of `http.N` where N is an integer representing the input.
+```
+curl -d '{"key1":"value1","key2":"value2"}' -XPOST -H "content-type: application/json" http://localhost:8888/app.log
+```
-**Example Curl message**
+### Configuration File
+
+```
+[INPUT]
+ name http
+ listen 0.0.0.0
+ port 8888
+ tag_key key1
+
+[OUTPUT]
+ name stdout
+ match value1
+```
+
+
+#### How to set multiple custom HTTP header on success
+
+The `success_header` parameter allows to set multiple HTTP headers on success. The format is:
+
+```ini
+[INPUT]
+ name http
+ success_header X-Custom custom-answer
+ success_header X-Another another-answer
+```
+
+
+#### Example Curl message
```
curl -d @app.log -XPOST -H "content-type: application/json" http://localhost:8888/app.log
@@ -49,5 +133,3 @@ curl -d @app.log -XPOST -H "content-type: application/json" http://localhost:888
```
$> fluent-bit -i http -p port=8888 -o stdout
```
-
-####
diff --git a/pipeline/inputs/kafka.md b/pipeline/inputs/kafka.md
new file mode 100644
index 000000000..aac51620f
--- /dev/null
+++ b/pipeline/inputs/kafka.md
@@ -0,0 +1,71 @@
+# Kafka
+
+The Kafka input plugin allows subscribing to one or more Kafka topics to collect messages from an [Apache Kafka](https://kafka.apache.org/) service.
+This plugin uses the official [librdkafka C library](https://github.com/edenhill/librdkafka) \(built-in dependency\).
+
+## Configuration Parameters
+
+| Key | Description | default |
+| :--- | :--- | :--- |
+| brokers | Single or multiple list of Kafka Brokers, e.g: 192.168.1.3:9092, 192.168.1.4:9092. | |
+| topics | Single entry or list of topics separated by comma \(,\) that Fluent Bit will subscribe to. | |
+| client\_id | Client id passed to librdkafka. | |
+| group\_id | Group id passed to librdkafka. | fluent-bit |
+| poll\_ms | Kafka brokers polling interval in milliseconds. | 500 |
+| rdkafka.{property} | `{property}` can be any [librdkafka properties](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) | |
+
+## Getting Started
+
+In order to subscribe/collect messages from Apache Kafka, you can run the plugin from the command line or through the configuration file:
+
+### Command Line
+
+The **kafka** plugin can read parameters through the **-p** argument \(property\), e.g:
+
+```text
+$ fluent-bit -i kafka -o stdout -p brokers=192.168.1.3:9092 -p topics=some-topic
+```
+
+### Configuration File
+
+In your main configuration file append the following _Input_ & _Output_ sections:
+
+```text
+[INPUT]
+ Name kafka
+ Brokers 192.168.1.3:9092
+ Topics some-topic
+ poll_ms 100
+
+[OUTPUT]
+ Name stdout
+```
+
+#### Example of using kafka input/output plugins
+
+The fluent-bit source repository contains a full example of using fluent-bit to process kafka records:
+
+```text
+[INPUT]
+ Name kafka
+ brokers kafka-broker:9092
+ topics fb-source
+ poll_ms 100
+
+[FILTER]
+ Name lua
+ Match *
+ script kafka.lua
+ call modify_kafka_message
+
+[OUTPUT]
+ Name kafka
+ brokers kafka-broker:9092
+ topics fb-sink
+```
+
+The above will connect to the broker listening on `kafka-broker:9092` and subscribe to the `fb-source` topic, polling for new messages every 100 milliseconds.
+
+Every message received is then processed with `kafka.lua` and sent back to the `fb-sink` topic of the same broker.
+
+The example can be executed locally with `make start` in the `examples/kafka_filter` directory (docker/compose is used).
diff --git a/pipeline/inputs/kubernetes-events.md b/pipeline/inputs/kubernetes-events.md
new file mode 100644
index 000000000..93b33695f
--- /dev/null
+++ b/pipeline/inputs/kubernetes-events.md
@@ -0,0 +1,50 @@
+---
+description: >-
+ Collects Kubernetes Events
+---
+
+# Kubernetes Events
+
+Kubernetes exports it events through the API server. This input plugin allows to retrieve those events as logs and get them processed through the pipeline.
+
+## Configuration
+
+
+| Key | Description | Default |
+|---------------------|---------------------------------------------------------------------------------------|------------------------------------------------------|
+| db | Set a database file to keep track of recorded Kubernetes events | |
+| db.sync | Set a database sync method. values: extra, full, normal and off | normal |
+| interval_sec | Set the polling interval for each channel. | 0 |
+| interval_nsec | Set the polling interval for each channel (sub seconds: nanoseconds) | 500000000 |
+| kube_url | API Server end-point | https://kubernetes.default.svc |
+| kube_ca_file | Kubernetes TLS CA file | /var/run/secrets/kubernetes.io/serviceaccount/ca.crt |
+| kube_ca_path | Kubernetes TLS ca path | |
+| kube_token_file | Kubernetes authorization token file. | /var/run/secrets/kubernetes.io/serviceaccount/token |
+| kube_token_ttl | kubernetes token ttl, until it is reread from the token file. | 10m |
+| kube_request_limit | kubernetes limit parameter for events query, no limit applied when set to 0. | 0 |
+| kube_retention_time | Kubernetes retention time for events. | 1h |
+| kube_namespace | Kubernetes namespace to query events from. Gets events from all namespaces by default | |
+| tls.debug | Debug level between 0 (nothing) and 4 (every detail). | 0 |
+| tls.verify | Enable or disable verification of TLS peer certificate. | On |
+| tls.vhost | Set optional TLS virtual host. | |
+
+## Getting Started
+
+### Simple Configuration File
+
+In the following configuration file, the input plugin *kubernetes_events* collects events every 5 seconds (default for *interval_nsec*) and exposes them through the [standard output plugin](../outputs/standard-output.md) on the console.
+
+```text
+[SERVICE]
+flush 1
+log_level info
+
+[INPUT]
+name kubernetes_events
+tag k8s_events
+kube_url https://kubernetes.default.svc
+
+[OUTPUT]
+name stdout
+match *
+```
diff --git a/pipeline/inputs/node-exporter-metrics.md b/pipeline/inputs/node-exporter-metrics.md
index 073705967..15437cad3 100644
--- a/pipeline/inputs/node-exporter-metrics.md
+++ b/pipeline/inputs/node-exporter-metrics.md
@@ -15,13 +15,43 @@ The initial release of Node Exporter Metrics contains a subset of collectors and
This plugin is currently only supported on Linux based operating systems\
-## Configuration
+## Configuration
| Key | Description | Default |
| --------------- | ---------------------------------------------------------------------- | --------- |
| scrape_interval | The rate at which metrics are collected from the host operating system | 5 seconds |
| path.procfs | The mount point used to collect process information and metrics | /proc/ |
| path.sysfs | The path in the filesystem used to collect system metrics | /sys/ |
+| collector.cpu.scrape\_interval | The rate in seconds at which cpu metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.cpufreq.scrape\_interval | The rate in seconds at which cpufreq metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.meminfo.scrape\_interval | The rate in seconds at which meminfo metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.diskstats.scrape\_interval | The rate in seconds at which diskstats metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.filesystem.scrape\_interval | The rate in seconds at which filesystem metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.uname.scrape\_interval | The rate in seconds at which uname metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used.| 0 seconds |
+| collector.stat.scrape\_interval | The rate in seconds at which stat metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.time.scrape\_interval | The rate in seconds at which time metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.loadavg.scrape\_interval | The rate in seconds at which loadavg metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.vmstat.scrape\_interval | The rate in seconds at which vmstat metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.filefd.scrape\_interval | The rate in seconds at which filefd metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| collector.nvme.scrape\_interval | The rate in seconds at which nvme metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds |
+| metrics | To specify which metrics are collected from the host operating system. These metrics depend on `/proc` or `/sys` fs. The actual values of metrics will be read from `/proc` or `/sys` when needed. cpu, cpufreq, meminfo, diskstats, filesystem, stat, loadavg, vmstat, netdev, and filefd depend on procfs. cpufreq metrics depend on sysfs. | `"cpu,cpufreq,meminfo,diskstats,filesystem,uname,stat,time,loadavg,vmstat,netdev,filefd"` |
+| filesystem.ignore\_mount\_point\_regex | Specify the regex for the mount points to prevent collection of/ignore. | `^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)` |
+| filesystem.ignore\_filesystem\_type\_regex | Specify the regex for the filesystem types to prevent collection of/ignore. | `^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$` |
+| diskstats.ignore\_device\_regex | Specify the regex for the diskstats to prevent collection of/ignore. | `^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$` |
+| systemd_service_restart_metrics | Determines if the collector will include service restart metrics | false |
+| systemd_unit_start_time_metrics | Determines if the collector will include unit start time metrics | false |
+| systemd_include_service_task_metrics | Determines if the collector will include service task metrics | false |
+| systemd_include_pattern | regex to determine which units are included in the metrics produced by the systemd collector | It is not applied unless explicitly set |
+| systemd_exclude_pattern | regex to determine which units are excluded in the metrics produced by the systemd collector | `.+\\.(automount|device|mount|scope|slice)"` |
+
+
+**Note:** The plugin top-level `scrape_interval` setting is the global default with any custom settings for individual `scrape_intervals` then overriding just that specific metric scraping interval.
+Each `collector.xxx.scrape_interval` option only overrides the interval for that specific collector and updates the associated set of provided metrics.
+
+The overridden intervals only change the collection interval, not the interval for publishing the metrics which is taken from the global setting.
+For example, if the global interval is set to 5s and an override interval of 60s is used then the published metrics will be reported every 5s but for the specific collector they will stay the same for 60s until it is collected again.
+This feature aims to help with down-sampling when collecting metrics.
+
## Collectors available
@@ -42,6 +72,8 @@ The following table describes the available collectors as part of this plugin. A
| time | Exposes the current system time. | Linux | v1.8 |
| uname | Exposes system information as provided by the uname system call. | Linux | v1.8 |
| vmstat | Exposes statistics from `/proc/vmstat`. | Linux | v1.8.2 |
+| systemd collector | Exposes statistics from systemd. | Linux | v2.1.3 |
+| nvme | Exposes nvme statistics from `/proc`. | Linux | v2.2.0 |
## Getting Started
diff --git a/pipeline/inputs/podman-metrics.md b/pipeline/inputs/podman-metrics.md
new file mode 100644
index 000000000..fb51e3328
--- /dev/null
+++ b/pipeline/inputs/podman-metrics.md
@@ -0,0 +1,108 @@
+---
+description: The Podman Metrics input plugin allows you to collect metrics from podman containers, so they can be exposed later as, for example, Prometheus counters and gauges.
+---
+
+# Podman Metrics
+
+## Configuration Parameters
+
+| **Key** | Description | Default |
+| ----------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
+| scrape_interval | Interval between each scrape of podman data (in seconds) | 30 |
+| scrape_on_start | Should this plugin scrape podman data after it is started | false |
+| path.config | Custom path to podman containers configuration file | /var/lib/containers/storage/overlay-containers/containers.json |
+| path.sysfs | Custom path to sysfs subsystem directory | /sys/fs/cgroup |
+| path.procfs | Custom path to proc subsystem directory | /proc |
+
+## Getting Started
+
+The podman metrics input plugin allows Fluent Bit to gather podman container metrics. The entire procedure of collecting container list and gathering data associated with them bases on filesystem data.This plugin does not execute podman commands or send http requests to podman api - instead it reads podman configuration file and metrics exposed by */sys* and */proc* filesystems.
+
+This plugin supports and automatically detects both cgroups v1 and v2.
+
+**Example Curl message for one running container**
+
+```
+$> curl 0.0.0.0:2021/metrics
+# HELP fluentbit_input_bytes_total Number of input bytes.
+# TYPE fluentbit_input_bytes_total counter
+fluentbit_input_bytes_total{name="podman_metrics.0"} 0
+# HELP fluentbit_input_records_total Number of input records.
+# TYPE fluentbit_input_records_total counter
+fluentbit_input_records_total{name="podman_metrics.0"} 0
+# HELP container_memory_usage_bytes Container memory usage in bytes
+# TYPE container_memory_usage_bytes counter
+container_memory_usage_bytes{id="858319c39f3f52cd44aa91a520aafb84ded3bc4b4a1e04130ccf87043149bbbf",name="blissful_wescoff",image="docker.io/library/ubuntu:latest"} 884736
+# HELP container_cpu_user_seconds_total Container cpu usage in seconds in user mode
+# TYPE container_cpu_user_seconds_total counter
+container_cpu_user_seconds_total{id="858319c39f3f52cd44aa91a520aafb84ded3bc4b4a1e04130ccf87043149bbbf",name="blissful_wescoff",image="docker.io/library/ubuntu:latest"} 0
+# HELP container_cpu_usage_seconds_total Container cpu usage in seconds
+# TYPE container_cpu_usage_seconds_total counter
+container_cpu_usage_seconds_total{id="858319c39f3f52cd44aa91a520aafb84ded3bc4b4a1e04130ccf87043149bbbf",name="blissful_wescoff",image="docker.io/library/ubuntu:latest"} 0
+# HELP container_network_receive_bytes_total Network received bytes
+# TYPE container_network_receive_bytes_total counter
+container_network_receive_bytes_total{id="858319c39f3f52cd44aa91a520aafb84ded3bc4b4a1e04130ccf87043149bbbf",name="blissful_wescoff",image="docker.io/library/ubuntu:latest",interface="eth0"} 8515
+# HELP container_network_receive_errors_total Network received errors
+# TYPE container_network_receive_errors_total counter
+container_network_receive_errors_total{id="858319c39f3f52cd44aa91a520aafb84ded3bc4b4a1e04130ccf87043149bbbf",name="blissful_wescoff",image="docker.io/library/ubuntu:latest",interface="eth0"} 0
+# HELP container_network_transmit_bytes_total Network transmited bytes
+# TYPE container_network_transmit_bytes_total counter
+container_network_transmit_bytes_total{id="858319c39f3f52cd44aa91a520aafb84ded3bc4b4a1e04130ccf87043149bbbf",name="blissful_wescoff",image="docker.io/library/ubuntu:latest",interface="eth0"} 962
+# HELP container_network_transmit_errors_total Network transmitedd errors
+# TYPE container_network_transmit_errors_total counter
+container_network_transmit_errors_total{id="858319c39f3f52cd44aa91a520aafb84ded3bc4b4a1e04130ccf87043149bbbf",name="blissful_wescoff",image="docker.io/library/ubuntu:latest",interface="eth0"} 0
+# HELP fluentbit_input_storage_overlimit Is the input memory usage overlimit ?.
+# TYPE fluentbit_input_storage_overlimit gauge
+fluentbit_input_storage_overlimit{name="podman_metrics.0"} 0
+# HELP fluentbit_input_storage_memory_bytes Memory bytes used by the chunks.
+# TYPE fluentbit_input_storage_memory_bytes gauge
+fluentbit_input_storage_memory_bytes{name="podman_metrics.0"} 0
+# HELP fluentbit_input_storage_chunks Total number of chunks.
+# TYPE fluentbit_input_storage_chunks gauge
+fluentbit_input_storage_chunks{name="podman_metrics.0"} 0
+# HELP fluentbit_input_storage_chunks_up Total number of chunks up in memory.
+# TYPE fluentbit_input_storage_chunks_up gauge
+fluentbit_input_storage_chunks_up{name="podman_metrics.0"} 0
+# HELP fluentbit_input_storage_chunks_down Total number of chunks down.
+# TYPE fluentbit_input_storage_chunks_down gauge
+fluentbit_input_storage_chunks_down{name="podman_metrics.0"} 0
+# HELP fluentbit_input_storage_chunks_busy Total number of chunks in a busy state.
+# TYPE fluentbit_input_storage_chunks_busy gauge
+fluentbit_input_storage_chunks_busy{name="podman_metrics.0"} 0
+# HELP fluentbit_input_storage_chunks_busy_bytes Total number of bytes used by chunks in a busy state.
+# TYPE fluentbit_input_storage_chunks_busy_bytes gauge
+fluentbit_input_storage_chunks_busy_bytes{name="podman_metrics.0"} 0
+```
+
+### Configuration File
+
+```
+[INPUT]
+ name podman_metrics
+ scrape_interval 10
+ scrape_on_start true
+[OUTPUT]
+ name prometheus_exporter
+```
+
+### Command Line
+
+```
+$> fluent-bit -i podman_metrics -o prometheus_exporter
+```
+
+### Exposed metrics
+
+Currently supported counters are:
+- container_memory_usage_bytes
+- container_memory_max_usage_bytes
+- container_memory_rss
+- container_spec_memory_limit_bytes
+- container_cpu_user_seconds_total
+- container_cpu_usage_seconds_total
+- container_network_receive_bytes_total
+- container_network_receive_errors_total
+- container_network_transmit_bytes_total
+- container_network_transmit_errors_total
+
+> This plugin mimics naming convetion of docker metrics exposed by [cadvisor](https://github.com/google/cadvisor) project
diff --git a/pipeline/inputs/process-exporter-metrics.md b/pipeline/inputs/process-exporter-metrics.md
new file mode 100644
index 000000000..5c933bce2
--- /dev/null
+++ b/pipeline/inputs/process-exporter-metrics.md
@@ -0,0 +1,103 @@
+---
+description: >-
+ A plugin based on Process Exporter to collect process level of metrics of system
+ metrics
+---
+
+# Process Exporter Metrics
+
+[Prometheus Node Exporter](https://github.com/prometheus/node_exporter) is a popular way to collect system level metrics from operating systems, such as CPU / Disk / Network / Process statistics.
+Fluent Bit 2.2 onwards includes a process exporter plugin that builds off the Prometheus design to collect process level metrics without having to manage two separate processes or agents.
+
+The Process Exporter Metrics plugin implements collecting of the various metrics available from [the 3rd party implementation of Prometheus Process Exporter](https://github.com/ncabatoff/process-exporter) and these will be expanded over time as needed.
+
+**Important note:** All metrics including those collected with this plugin flow through a separate pipeline from logs and current filters do not operate on top of metrics.
+
+This plugin is only supported on Linux based operating systems as it uses the `proc` filesystem to access the relevant metrics.
+
+macOS does not have the `proc` filesystem so this plugin will not work for it.
+
+
+## Configuration
+
+| Key | Description | Default |
+| ------------------------- | -------------------------------------------------------------------------------------- | --------- |
+| scrape\_interval | The rate at which metrics are collected. | 5 seconds |
+| path.procfs | The mount point used to collect process information and metrics. Read-only is enough | /proc/ |
+| process\_include\_pattern | regex to determine which names of processes are included in the metrics produced by this plugin | It is applied for all process unless explicitly set. Default is `.+`. |
+| process\_exclude\_pattern | regex to determine which names of processes are excluded in the metrics produced by this plugin | It is not applied unless explicitly set. Default is `NULL`. |
+| metrics | To specify which process level of metrics are collected from the host operating system. These metrics depend on `/proc` fs. The actual values of metrics will be read from `/proc` when needed. cpu, io, memory, state, context\_switches, fd, start\_time, thread\_wchan, thread depend on procfs. | `cpu,io,memory,state,context_switches,fd,start_time,thread_wchan,thread` |
+
+## Metrics Available
+
+| Name | Description |
+| ----------------- | -------------------------------------------------- |
+| cpu | Exposes CPU statistics from `/proc`. |
+| io | Exposes I/O statistics from `/proc`. |
+| memory | Exposes memory statistics from `/proc`. |
+| state | Exposes process state statistics from `/proc`. |
+| context\_switches | Exposes context\_switches statistics from `/proc`. |
+| fd | Exposes file descriptors statistics from `/proc`. |
+| start\_time | Exposes start\_time statistics from `/proc`. |
+| thread\_wchan | Exposes thread\_wchan from `/proc`. |
+| thread | Exposes thread statistics from `/proc`. |
+
+## Getting Started
+
+### Simple Configuration File
+
+In the following configuration file, the input plugin _process\_exporter\_metrics collects _metrics every 2 seconds and exposes them through our [Prometheus Exporter](../outputs/prometheus-exporter.md) output plugin on HTTP/TCP port 2021.
+
+```
+# Process Exporter Metrics + Prometheus Exporter
+# -------------------------------------------
+# The following example collect host metrics on Linux and expose
+# them through a Prometheus HTTP end-point.
+#
+# After starting the service try it with:
+#
+# $ curl http://127.0.0.1:2021/metrics
+#
+[SERVICE]
+ flush 1
+ log_level info
+
+[INPUT]
+ name process_exporter_metrics
+ tag process_metrics
+ scrape_interval 2
+
+[OUTPUT]
+ name prometheus_exporter
+ match process_metrics
+ host 0.0.0.0
+ port 2021
+```
+
+You can see the metrics by using _curl:_
+
+```bash
+curl http://127.0.0.1:2021/metrics
+```
+
+### Container to Collect Host Metrics
+
+When deploying Fluent Bit in a container you will need to specify additional settings to ensure that Fluent Bit has access to the process details.
+The following `docker` command deploys Fluent Bit with a specific mount path for `procfs` and settings enabled to ensure that Fluent Bit can collect from the host.
+These are then exposed over port 2021.
+
+```
+docker run -ti -v /proc:/host/proc:ro \
+ -p 2021:2021 \
+ fluent/fluent-bit:2.2 \
+ /fluent-bit/bin/fluent-bit \
+ -i process_exporter_metrics -p path.procfs=/host/proc \
+ -o prometheus_exporter \
+ -f 1
+```
+
+## Enhancement Requests
+
+Development prioritises a subset of the available collectors in the [the 3rd party implementation of Prometheus Process Exporter](https://github.com/ncabatoff/process-exporter), to request others please open a Github issue by using the following template:\
+\
+\- [in_process_exporter_metrics](https://github.com/fluent/fluent-bit/issues/new?assignees=\&labels=\&template=feature_request.md\&title=in_process_exporter_metrics:%20add%20ABC%20collector)
diff --git a/pipeline/inputs/splunk.md b/pipeline/inputs/splunk.md
new file mode 100644
index 000000000..ae23faebb
--- /dev/null
+++ b/pipeline/inputs/splunk.md
@@ -0,0 +1,56 @@
+# Splunk (HTTP HEC)
+
+The **splunk** input plugin handles [Splunk HTTP HEC](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) requests.
+
+## Configuration Parameters
+
+| **Key** | Description | default |
+|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|---------|
+| listen | The address to listen on | 0.0.0.0 |
+| port | The port for Fluent Bit to listen on | 9880 |
+| tag_key | Specify the key name to overwrite a tag. If set, the tag will be overwritten by a value of the key. | |
+| buffer_max_size | Specify the maximum buffer size in KB to receive a JSON message. | 4M |
+| buffer_chunk_size | This sets the chunk size for incoming incoming JSON messages. These chunks are then stored/managed in the space available by buffer_max_size. | 512K |
+| successful_response_code | It allows to set successful response code. `200`, `201` and `204` are supported. | 201 |
+| splunk\_token | Add an Splunk token for HTTP HEC.` | |
+
+## Getting Started
+
+In order to start performing the checks, you can run the plugin from the command line or through the configuration file.
+
+#### How to set tag
+
+The tag for the Splunk input plugin is set by adding the tag to the end of the request URL by default.
+This tag is then used to route the event through the system.
+The default behavior of the splunk input sets the tags for the following endpoints:
+
+* `/services/collector`
+* `/services/collector/event`
+* `/services/collector/raw`
+
+The requests for these endpoints are interpreted as `services_collector`, `services_collector_event`, and `services_collector_raw`.
+
+If you want to use the other tags for multiple instantiating input splunk plugin, you have to specify `tag` property on the each of splunk plugin configurations to prevent collisions of data pipeline.
+
+### Command Line
+
+From the command line you can configure Fluent Bit to handle HTTP HEC requests with the following options:
+
+```bash
+$ fluent-bit -i splunk -p port=8088 -o stdout
+```
+
+### Configuration File
+
+In your main configuration file append the following _Input_ & _Output_ sections:
+
+```python
+[INPUT]
+ name splunk
+ listen 0.0.0.0
+ port 8088
+
+[OUTPUT]
+ name stdout
+ match *
+```
diff --git a/pipeline/inputs/standard-input.md b/pipeline/inputs/standard-input.md
index bdd11dc40..0a7c51c3a 100644
--- a/pipeline/inputs/standard-input.md
+++ b/pipeline/inputs/standard-input.md
@@ -1,53 +1,184 @@
# Standard Input
-The **stdin** plugin allows to retrieve valid JSON text messages over the standard input interface \(stdin\). In order to use it, specify the plugin name as the input, e.g:
+The **stdin** plugin supports retrieving a message stream from the standard input interface \(stdin\) of the Fluent Bit process.
+In order to use it, specify the plugin name as the input, e.g:
```bash
$ fluent-bit -i stdin -o stdout
```
-As input data the _stdin_ plugin recognize the following JSON data formats:
+If the stdin stream is closed (end-of-file), the stdin plugin will instruct
+Fluent Bit to exit with success (0) after flushing any pending output.
-```bash
-1. { map => val, map => val, map => val }
-2. [ time, { map => val, map => val, map => val } ]
+## Input formats
+
+If no parser is configured for the stdin plugin, it expects *valid JSON* input data in one of the following formats:
+
+1. A JSON object with one or more key-value pairs: `{ "key": "value", "key2": "value2" }`
+3. A 2-element JSON array in [Fluent Bit Event](../../concepts/key-concepts.md#event-or-record) format, which may be:
+ * `[TIMESTAMP, { "key": "value" }]` where TIMESTAMP is a floating point value representing a timestamp in seconds; or
+ * from Fluent Bit v2.1.0, `[[TIMESTAMP, METADATA], { "key": "value" }]` where TIMESTAMP has the same meaning as above and and METADATA is a JSON object.
+
+Multi-line input JSON is supported.
+
+Any input data that is *not* in one of the above formats will cause the plugin to log errors like:
+
+```
+[debug] [input:stdin:stdin.0] invalid JSON message, skipping
+[error] [input:stdin:stdin.0] invalid record found, it's not a JSON map or array
```
+To handle inputs in other formats, a parser must be explicitly specified in the configuration for the `stdin` plugin. See [parser input example](#parser-input-example) for sample configuration.
+
+## Log event timestamps
+
+The Fluent Bit event timestamp will be set from the input record if the 2-element event input is used or a custom parser configuration supplies a timestamp. Otherwise the event timestamp will be set to the timestamp at which the record is read by the stdin plugin.
+
+## Examples
+
+### Json input example
+
A better example to demonstrate how it works will be through a _Bash_ script that generates messages and writes them to [Fluent Bit](http://fluentbit.io). Write the following content in a file named _test.sh_:
```bash
#!/bin/sh
-while :; do
+for ((i=0; i<=5; i++)); do
echo -n "{\"key\": \"some value\"}"
sleep 1
done
```
-Give the script execution permission:
+Now lets start the script and [Fluent Bit](http://fluentbit.io):
+
+```bash
+$ bash test.sh | fluent-bit -q -i stdin -o stdout
+[0] stdin.0: [[1684196745.942883835, {}], {"key"=>"some value"}]
+[0] stdin.0: [[1684196746.938949056, {}], {"key"=>"some value"}]
+[0] stdin.0: [[1684196747.940162493, {}], {"key"=>"some value"}]
+[0] stdin.0: [[1684196748.941392297, {}], {"key"=>"some value"}]
+[0] stdin.0: [[1684196749.942644238, {}], {"key"=>"some value"}]
+[0] stdin.0: [[1684196750.943721442, {}], {"key"=>"some value"}]
+```
+
+### Json input with timestamp example
+
+An input event timestamp may also be supplied. Replace `test.sh` with:
+
+```bash
+#!/bin/sh
+
+for ((i=0; i<=5; i++)); do
+ echo -n "
+ [
+ $(date '+%s.%N' -d '1 day ago'),
+ {
+ \"realtimestamp\": $(date '+%s.%N')
+ }
+ ]
+ "
+ sleep 1
+done
+```
+
+Re-run the sample command. Note that the timestamps output by Fluent Bit are now one day old because Fluent Bit used the input message timestamp.
```bash
-$ chmod 755 test.sh
+$ bash test.sh | fluent-bit -q -i stdin -o stdout
+[0] stdin.0: [[1684110480.028171300, {}], {"realtimestamp"=>1684196880.030070}]
+[0] stdin.0: [[1684110481.033753395, {}], {"realtimestamp"=>1684196881.034741}]
+[0] stdin.0: [[1684110482.036730051, {}], {"realtimestamp"=>1684196882.037704}]
+[0] stdin.0: [[1684110483.039903879, {}], {"realtimestamp"=>1684196883.041081}]
+[0] stdin.0: [[1684110484.044719457, {}], {"realtimestamp"=>1684196884.046404}]
+[0] stdin.0: [[1684110485.048710107, {}], {"realtimestamp"=>1684196885.049651}]
```
-Now lets start the script and [Fluent Bit](http://fluentbit.io) in the following way:
+### Json input with metadata example
+
+Additional metadata is also supported on Fluent Bit v2.1.0 and above by replacing the timestamp
+with a 2-element object, e.g.:
```bash
-$ ./test.sh | fluent-bit -i stdin -o stdout
-Fluent Bit v1.x.x
-* Copyright (C) 2019-2020 The Fluent Bit Authors
-* Copyright (C) 2015-2018 Treasure Data
-* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd
-* https://fluentbit.io
+#!/bin/sh
+for ((i=0; i<=5; i++)); do
+ echo -n "
+ [
+ [
+ $(date '+%s.%N' -d '1 day ago'),
+ {\"metakey\": \"metavalue\"}
+ ],
+ {
+ \"realtimestamp\": $(date '+%s.%N')
+ }
+ ]
+ "
+ sleep 1
+done
+```
+
+```
+$ bash ./test.sh | fluent-bit -q -i stdin -o stdout
+[0] stdin.0: [[1684110513.060139417, {"metakey"=>"metavalue"}], {"realtimestamp"=>1684196913.061017}]
+[0] stdin.0: [[1684110514.063085317, {"metakey"=>"metavalue"}], {"realtimestamp"=>1684196914.064145}]
+[0] stdin.0: [[1684110515.066210508, {"metakey"=>"metavalue"}], {"realtimestamp"=>1684196915.067155}]
+[0] stdin.0: [[1684110516.069149971, {"metakey"=>"metavalue"}], {"realtimestamp"=>1684196916.070132}]
+[0] stdin.0: [[1684110517.072484016, {"metakey"=>"metavalue"}], {"realtimestamp"=>1684196917.073636}]
+[0] stdin.0: [[1684110518.075428724, {"metakey"=>"metavalue"}], {"realtimestamp"=>1684196918.076292}]
+```
+
+On older Fluent Bit versions records in this format will be discarded. Fluent Bit will log:
+
+```
+[ warn] unknown time format 6
+```
+
+if the log level permits.
+
+### Parser input example
-[2016/10/07 21:44:46] [ info] [engine] started
-[0] stdin.0: [1475898286, {"key"=>"some value"}]
-[1] stdin.0: [1475898287, {"key"=>"some value"}]
-[2] stdin.0: [1475898288, {"key"=>"some value"}]
-[3] stdin.0: [1475898289, {"key"=>"some value"}]
-[4] stdin.0: [1475898290, {"key"=>"some value"}]
+To capture inputs in other formats, specify a parser configuration for the
+`stdin` plugin.
+
+For example, if you want to read raw messages line-by-line and forward them you
+could use a `parser.conf` that captures the whole message line:
+
+```
+[PARSER]
+ name stringify_message
+ format regex
+ Key_Name message
+ regex ^(?.*)
+```
+
+then use that in the `parser` clause of the stdin plugin in the `fluent-bit.conf`:
+
+```
+[INPUT]
+ Name stdin
+ Tag stdin
+ Parser stringify_message
+
+[OUTPUT]
+ Name stdout
+ Match *
```
+Fluent Bit will now read each line and emit a single message for each input
+line:
+
+```
+$ seq 1 5 | /opt/fluent-bit/bin/fluent-bit -c fluent-bit.conf -R parser.conf -q
+[0] stdin: [1681358780.517029169, {"message"=>"1"}]
+[1] stdin: [1681358780.517068334, {"message"=>"2"}]
+[2] stdin: [1681358780.517072116, {"message"=>"3"}]
+[3] stdin: [1681358780.517074758, {"message"=>"4"}]
+[4] stdin: [1681358780.517077392, {"message"=>"5"}]
+$
+```
+
+In real-world deployments it is best to use a more realistic parser that splits
+messages into real fields and adds appropriate tags.
+
## Configuration Parameters
The plugin supports the following configuration parameters:
@@ -55,4 +186,4 @@ The plugin supports the following configuration parameters:
| Key | Description | Default |
| :--- | :--- | :--- |
| Buffer\_Size | Set the buffer size to read data. This value is used to increase buffer size. The value must be according to the [Unit Size](../../administration/configuring-fluent-bit/unit-sizes.md) specification. | 16k |
-
+| Parser | The name of the parser to invoke instead of the default JSON input parser | |
diff --git a/pipeline/inputs/syslog.md b/pipeline/inputs/syslog.md
index 29f0a33f8..1fe97fe67 100644
--- a/pipeline/inputs/syslog.md
+++ b/pipeline/inputs/syslog.md
@@ -17,6 +17,7 @@ The plugin supports the following configuration parameters:
| Buffer\_Chunk\_Size | By default the buffer to store the incoming Syslog messages, do not allocate the maximum memory allowed, instead it allocate memory when is required. The rounds of allocations are set by _Buffer\_Chunk\_Size_. If not set, _Buffer\_Chunk\_Size_ is equal to 32000 bytes \(32KB\). Read considerations below when using _udp_ or _unix\_udp_ mode. | |
| Buffer\_Max\_Size | Specify the maximum buffer size to receive a Syslog message. If not set, the default size will be the value of _Buffer\_Chunk\_Size_. | |
| Receive\_Buffer\_Size | Specify the maximum socket receive buffer size. If not set, the default value is OS-dependant, but generally too low to accept thousands of syslog messages per second without loss on _udp_ or _unix\_udp_ sockets. Note that on Linux the value is capped by `sysctl net.core.rmem_max`.| |
+| Source\_Address\_Key| Specify the key where the source address will be injected. | |
### Considerations
diff --git a/pipeline/inputs/tail.md b/pipeline/inputs/tail.md
index 4213492b5..fec3c2b47 100644
--- a/pipeline/inputs/tail.md
+++ b/pipeline/inputs/tail.md
@@ -19,7 +19,7 @@ The plugin supports the following configuration parameters:
| Read\_from\_Head | For new discovered files on start \(without a database offset/position\), read the content from the head of the file, not tail. | False |
| Refresh\_Interval | The interval of refreshing the list of watched files in seconds. | 60 |
| Rotate\_Wait | Specify the number of extra time in seconds to monitor a file once is rotated in case some pending data is flushed. | 5 |
-| Ignore\_Older | Ignores files which modification date is older than this time in seconds. Supports m,h,d \(minutes, hours, days\) syntax. | |
+| Ignore\_Older | Ignores files older than `ignore_older`. Supports m, h, d (minutes, hours, days) syntax. Default behavior is to read all. | |
| Skip\_Long\_Lines | When a monitored file reaches its buffer capacity due to a very long line \(Buffer\_Max\_Size\), the default behavior is to stop monitoring that file. Skip\_Long\_Lines alter that behavior and instruct Fluent Bit to skip long lines and continue processing other lines that fits into the buffer size. | Off |
| Skip\_Empty\_Lines | Skips empty lines in the log file from any further processing or output. | Off |
| DB | Specify the database file to keep track of monitored files and offsets. | |
@@ -35,6 +35,7 @@ The plugin supports the following configuration parameters:
| Tag\_Regex | Set a regex to extract fields from the file name. E.g. `(?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)_(?[^_]+)_(?.+)-` | |
| Static\_Batch\_Size | Set the maximum number of bytes to process per iteration for the monitored static files (files that already exists upon Fluent Bit start). | 50M |
+
Note that if the database parameter `DB` is **not** specified, by default the plugin will start reading each target file from the beginning. This also might cause some unwanted behavior, for example when a line is bigger that `Buffer_Chunk_Size` and `Skip_Long_Lines` is not turned on, the file will be read from the beginning of each `Refresh_Interval` until the file is rotated.
## Multiline Support
@@ -125,7 +126,7 @@ $ fluent-bit -i tail -p path=/var/log/syslog -o stdout
### Configuration File
-In your main configuration file append the following _Input_ & _Output_ sections. An example visualization can be found [here](https://link.calyptia.com/vg2)
+In your main configuration file append the following _Input_ & _Output_ sections.
{% tabs %}
{% tab title="fluent-bit.conf" %}
@@ -177,7 +178,7 @@ In the case above we can use the following parser, that extracts the Time as `ti
[PARSER]
Name multiline
Format regex
- Regex /(?