diff --git a/Cargo.lock b/Cargo.lock index bd9d31d..45746af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -282,6 +282,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "cpufeatures" version = "0.2.12" @@ -414,6 +423,7 @@ version = "0.0.0" dependencies = [ "anyhow", "cc", + "convert_case", "once_cell", "ropey", "thiserror", @@ -569,6 +579,10 @@ version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +[[package]] +name = "ide-completion" +version = "0.1.0" + [[package]] name = "idna" version = "0.5.0" @@ -1281,6 +1295,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + [[package]] name = "url" version = "2.5.2" diff --git a/Cargo.toml b/Cargo.toml index 9b9d2d0..899ef64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [workspace] members = [ - "fluent-bit-language-server", + "fluent-bit-language-server", "ide-completion", "xtask", ] resolver = "2" diff --git a/fluent-bit-language-server/Cargo.toml b/fluent-bit-language-server/Cargo.toml index 1fb7693..125ebf0 100644 --- a/fluent-bit-language-server/Cargo.toml +++ b/fluent-bit-language-server/Cargo.toml @@ -20,6 +20,7 @@ tokio = { version = "1.38.0", features = ["full"] } once_cell = "1.19.0" thiserror = "1.0.63" ropey = "1.6.1" +convert_case = "0.6.0" [build-dependencies] diff --git a/fluent-bit-language-server/src/assets/docs/input/network-io-metrics.md b/fluent-bit-language-server/src/assets/docs/input/network-io-metrics.md new file mode 100644 index 0000000..6efc35b --- /dev/null +++ b/fluent-bit-language-server/src/assets/docs/input/network-io-metrics.md @@ -0,0 +1,59 @@ +# Network I/O Metrics + +The **netif** input plugin gathers network traffic information of the running system every certain interval of time, and reports them. + +The Network I/O Metrics plugin creates metrics that are log-based \(I.e. JSON payload\). If you are looking for Prometheus-based metrics please see the Node Exporter Metrics input plugin. + +## Configuration Parameters + +The plugin supports the following configuration parameters: + +| Key | Description | Default | +| :--- | :--- | :--- | +| Interface | Specify the network interface to monitor. e.g. eth0 | | +| Interval\_Sec | Polling interval \(seconds\). | 1 | +| Interval\_NSec | Polling interval \(nanosecond\). | 0 | +| Verbose | If true, gather metrics precisely. | false | +| Test\_At\_Init | If true, testing if the network interface is valid at initialization. | false | + +## Getting Started + +In order to monitor network traffic from your system, you can run the plugin from the command line or through the configuration file: + +### Command Line + +```bash +$ bin/fluent-bit -i netif -p interface=eth0 -o stdout +Fluent Bit v1.x.x +* Copyright (C) 2019-2020 The Fluent Bit Authors +* Copyright (C) 2015-2018 Treasure Data +* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd +* https://fluentbit.io + +[2017/07/08 23:34:18] [ info] [engine] started +[0] netif.0: [1499524459.001698260, {"eth0.rx.bytes"=>89769869, "eth0.rx.packets"=>73357, "eth0.rx.errors"=>0, "eth0.tx.bytes"=>4256474, "eth0.tx.packets"=>24293, "eth0.tx.errors"=>0}] +[1] netif.0: [1499524460.002541885, {"eth0.rx.bytes"=>98, "eth0.rx.packets"=>1, "eth0.rx.errors"=>0, "eth0.tx.bytes"=>98, "eth0.tx.packets"=>1, "eth0.tx.errors"=>0}] +[2] netif.0: [1499524461.001142161, {"eth0.rx.bytes"=>98, "eth0.rx.packets"=>1, "eth0.rx.errors"=>0, "eth0.tx.bytes"=>98, "eth0.tx.packets"=>1, "eth0.tx.errors"=>0}] +[3] netif.0: [1499524462.002612971, {"eth0.rx.bytes"=>98, "eth0.rx.packets"=>1, "eth0.rx.errors"=>0, "eth0.tx.bytes"=>98, "eth0.tx.packets"=>1, "eth0.tx.errors"=>0}] +``` + +### Configuration File + +In your main configuration file append the following _Input_ & _Output_ sections: + +```python +[INPUT] + Name netif + Tag netif + Interval_Sec 1 + Interval_NSec 0 + Interface eth0 +[OUTPUT] + Name stdout + Match * +``` + +Note: Total interval \(sec\) = Interval\_Sec + \(Interval\_Nsec / 1000000000\). + +e.g. 1.5s = 1s + 500000000ns + diff --git a/fluent-bit-language-server/src/assets/docs/input/nginx.md b/fluent-bit-language-server/src/assets/docs/input/nginx.md new file mode 100644 index 0000000..d56fc37 --- /dev/null +++ b/fluent-bit-language-server/src/assets/docs/input/nginx.md @@ -0,0 +1,241 @@ +# NGINX Exporter Metrics + +_NGINX Exporter Metrics_ input plugin scrapes metrics from the NGINX stub status handler. + +## Configuration Parameters + +The plugin supports the following configuration parameters: + +| Key | Description | Default | +| :--- | :--- | :--- | +| Host | Name of the target host or IP address to check. | localhost | +| Port | Port of the target nginx service to connect to. | 80 | +| Status_URL | The URL of the Stub Status Handler. | /status | +| Nginx_Plus | Turn on NGINX plus mode. | true | + +## Getting Started + +NGINX must be configured with a location that invokes the stub status handler. Here is an example configuration with such a location: + +``` +server { + listen 80; + listen [::]:80; + server_name localhost; + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + // configure the stub status handler. + location /status { + stub_status; + } +} +``` + +### Configuration with NGINX Plus REST API + +A much more powerful and flexible metrics API is available with NGINX Plus. A path needs to be configured +in NGINX Plus first. + +``` +server { + listen 80; + listen [::]:80; + server_name localhost; + + # enable /api/ location with appropriate access control in order + # to make use of NGINX Plus API + # + location /api/ { + api write=on; + # configure to allow requests from the server running fluent-bit + allow 192.168.1.*; + deny all; + } +} +``` + +### Command Line + +From the command line you can let Fluent Bit generate the checks with the following options: + +```bash +$ fluent-bit -i nginx_metrics -p host=127.0.0.1 -p port=80 -p status_url=/status -p nginx_plus=off -o stdout +``` + +To gather metrics from the command line with the NGINX Plus REST API we need to turn on the +nginx_plus property, like so: + +```bash +$ fluent-bit -i nginx_metrics -p host=127.0.0.1 -p port=80 -p nginx_plus=on -p status_url=/api -o stdout +``` + + +### Configuration File + +In your main configuration file append the following _Input_ & _Output_ sections: + +```ini +[INPUT] + Name nginx_metrics + Host 127.0.0.1 + Port 80 + Status_URL /status + Nginx_Plus off + +[OUTPUT] + Name stdout + Match * +``` + +And for NGINX Plus API: + +```ini +[INPUT] + Name nginx_metrics + Nginx_Plus on + Host 127.0.0.1 + Port 80 + Status_URL /api + +[OUTPUT] + Name stdout + Match * +``` + + + +## Testing + +You can quickly test against the NGINX server running on localhost by invoking it directly from the command line: + +```bash +$ fluent-bit -i nginx_metrics -p host=127.0.0.1 -p nginx_plus=off -o stdout -p match=* -f 1 +Fluent Bit v2.x.x +* Copyright (C) 2019-2020 The Fluent Bit Authors +* Copyright (C) 2015-2018 Treasure Data +* Fluent Bit is a CNCF sub-project under the umbrella of Fluentd +* https://fluentbit.io + +2021-10-14T19:37:37.228691854Z nginx_connections_accepted = 788253884 +2021-10-14T19:37:37.228691854Z nginx_connections_handled = 788253884 +2021-10-14T19:37:37.228691854Z nginx_http_requests_total = 42045501 +2021-10-14T19:37:37.228691854Z nginx_connections_active = 2009 +2021-10-14T19:37:37.228691854Z nginx_connections_reading = 0 +2021-10-14T19:37:37.228691854Z nginx_connections_writing = 1 +2021-10-14T19:37:37.228691854Z nginx_connections_waiting = 2008 +2021-10-14T19:37:35.229919621Z nginx_up = 1 +``` + +## Exported Metrics + +This documentation is copied from the nginx prometheus exporter metrics documentation: +[https://github.com/nginxinc/nginx-prometheus-exporter/blob/master/README.md]. + +### Common metrics: +Name | Type | Description | Labels +----|----|----|----| +`nginx_up` | Gauge | Shows the status of the last metric scrape: `1` for a successful scrape and `0` for a failed one | [] | + +### Metrics for NGINX OSS: +#### [Stub status metrics](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html) +Name | Type | Description | Labels +----|----|----|----| +`nginx_connections_accepted` | Counter | Accepted client connections. | [] | +`nginx_connections_active` | Gauge | Active client connections. | [] | +`nginx_connections_handled` | Counter | Handled client connections. | [] | +`nginx_connections_reading` | Gauge | Connections where NGINX is reading the request header. | [] | +`nginx_connections_waiting` | Gauge | Idle client connections. | [] | +`nginx_connections_writing` | Gauge | Connections where NGINX is writing the response back to the client. | [] | +`nginx_http_requests_total` | Counter | Total http requests. | [] | + +### Metrics for NGINX Plus: +#### [Connections](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_connections) +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_connections_accepted` | Counter | Accepted client connections | [] | +`nginxplus_connections_active` | Gauge | Active client connections | [] | +`nginxplus_connections_dropped` | Counter | Dropped client connections dropped | [] | +`nginxplus_connections_idle` | Gauge | Idle client connections | [] | + +#### [HTTP](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_) +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_http_requests_total` | Counter | Total http requests | [] | +`nginxplus_http_requests_current` | Gauge | Current http requests | [] | + +#### [SSL](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_ssl_object) +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_ssl_handshakes` | Counter | Successful SSL handshakes | [] | +`nginxplus_ssl_handshakes_failed` | Counter | Failed SSL handshakes | [] | +`nginxplus_ssl_session_reuses` | Counter | Session reuses during SSL handshake | [] | + +#### [HTTP Server Zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_server_zone) +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_server_zone_processing` | Gauge | Client requests that are currently being processed | `server_zone` | +`nginxplus_server_zone_requests` | Counter | Total client requests | `server_zone` | +`nginxplus_server_zone_responses` | Counter | Total responses sent to clients | `code` (the response status code. The values are: `1xx`, `2xx`, `3xx`, `4xx` and `5xx`), `server_zone` | +`nginxplus_server_zone_discarded` | Counter | Requests completed without sending a response | `server_zone` | +`nginxplus_server_zone_received` | Counter | Bytes received from clients | `server_zone` | +`nginxplus_server_zone_sent` | Counter | Bytes sent to clients | `server_zone` | + +#### [Stream Server Zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_server_zone) +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_stream_server_zone_processing` | Gauge | Client connections that are currently being processed | `server_zone` | +`nginxplus_stream_server_zone_connections` | Counter | Total connections | `server_zone` | +`nginxplus_stream_server_zone_sessions` | Counter | Total sessions completed | `code` (the response status code. The values are: `2xx`, `4xx`, and `5xx`), `server_zone` | +`nginxplus_stream_server_zone_discarded` | Counter | Connections completed without creating a session | `server_zone` | +`nginxplus_stream_server_zone_received` | Counter | Bytes received from clients | `server_zone` | +`nginxplus_stream_server_zone_sent` | Counter | Bytes sent to clients | `server_zone` | + +#### [HTTP Upstreams](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_upstream) + +> Note: for the `state` metric, the string values are converted to float64 using the following rule: `"up"` -> `1.0`, `"draining"` -> `2.0`, `"down"` -> `3.0`, `"unavail"` –> `4.0`, `"checking"` –> `5.0`, `"unhealthy"` -> `6.0`. + +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_upstream_server_state` | Gauge | Current state | `server`, `upstream` | +`nginxplus_upstream_server_active` | Gauge | Active connections | `server`, `upstream` | +`nginxplus_upstream_server_limit` | Gauge | Limit for connections which corresponds to the max_conns parameter of the upstream server. Zero value means there is no limit | `server`, `upstream` | +`nginxplus_upstream_server_requests` | Counter | Total client requests | `server`, `upstream` | +`nginxplus_upstream_server_responses` | Counter | Total responses sent to clients | `code` (the response status code. The values are: `1xx`, `2xx`, `3xx`, `4xx` and `5xx`), `server`, `upstream` | +`nginxplus_upstream_server_sent` | Counter | Bytes sent to this server | `server`, `upstream` | +`nginxplus_upstream_server_received` | Counter | Bytes received to this server | `server`, `upstream` | +`nginxplus_upstream_server_fails` | Counter | Number of unsuccessful attempts to communicate with the server | `server`, `upstream` | +`nginxplus_upstream_server_unavail` | Counter | How many times the server became unavailable for client requests (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold | `server`, `upstream` | +`nginxplus_upstream_server_header_time` | Gauge | Average time to get the response header from the server | `server`, `upstream` | +`nginxplus_upstream_server_response_time` | Gauge | Average time to get the full response from the server | `server`, `upstream` | +`nginxplus_upstream_keepalives` | Gauge | Idle keepalive connections | `upstream` | +`nginxplus_upstream_zombies` | Gauge | Servers removed from the group but still processing active client requests | `upstream` | + +#### [Stream Upstreams](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_stream_upstream) + +> Note: for the `state` metric, the string values are converted to float64 using the following rule: `"up"` -> `1.0`, `"down"` -> `3.0`, `"unavail"` –> `4.0`, `"checking"` –> `5.0`, `"unhealthy"` -> `6.0`. + +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_stream_upstream_server_state` | Gauge | Current state | `server`, `upstream` | +`nginxplus_stream_upstream_server_active` | Gauge | Active connections | `server` , `upstream` | +`nginxplus_stream_upstream_server_limit` | Gauge | Limit for connections which corresponds to the max_conns parameter of the upstream server. Zero value means there is no limit | `server` , `upstream` | +`nginxplus_stream_upstream_server_connections` | Counter | Total number of client connections forwarded to this server | `server`, `upstream` | +`nginxplus_stream_upstream_server_connect_time` | Gauge | Average time to connect to the upstream server | `server`, `upstream` +`nginxplus_stream_upstream_server_first_byte_time` | Gauge | Average time to receive the first byte of data | `server`, `upstream` | +`nginxplus_stream_upstream_server_response_time` | Gauge | Average time to receive the last byte of data | `server`, `upstream` | +`nginxplus_stream_upstream_server_sent` | Counter | Bytes sent to this server | `server`, `upstream` | +`nginxplus_stream_upstream_server_received` | Counter | Bytes received from this server | `server`, `upstream` | +`nginxplus_stream_upstream_server_fails` | Counter | Number of unsuccessful attempts to communicate with the server | `server`, `upstream` | +`nginxplus_stream_upstream_server_unavail` | Counter | How many times the server became unavailable for client connections (state 'unavail') due to the number of unsuccessful attempts reaching the max_fails threshold | `server`, `upstream` | +`nginxplus_stream_upstream_zombies` | Gauge | Servers removed from the group but still processing active client connections | `upstream`| + +#### [Location Zones](https://nginx.org/en/docs/http/ngx_http_api_module.html#def_nginx_http_location_zone) +Name | Type | Description | Labels +----|----|----|----| +`nginxplus_location_zone_requests` | Counter | Total client requests | `location_zone` | +`nginxplus_location_zone_responses` | Counter | Total responses sent to clients | `code` (the response status code. The values are: `1xx`, `2xx`, `3xx`, `4xx` and `5xx`), `location_zone` | +`nginxplus_location_zone_discarded` | Counter | Requests completed without sending a response | `location_zone` | +`nginxplus_location_zone_received` | Counter | Bytes received from clients | `location_zone` | +`nginxplus_location_zone_sent` | Counter | Bytes sent to clients | `location_zone` | diff --git a/fluent-bit-language-server/src/assets/docs/input/node-exporter-metrics.md b/fluent-bit-language-server/src/assets/docs/input/node-exporter-metrics.md new file mode 100644 index 0000000..097f1ac --- /dev/null +++ b/fluent-bit-language-server/src/assets/docs/input/node-exporter-metrics.md @@ -0,0 +1,204 @@ +--- +description: >- + A plugin based on Prometheus Node Exporter to collect system / host level + metrics +--- + +# Node Exporter Metrics + +[Prometheus Node Exporter](https://github.com/prometheus/node_exporter) is a popular way to collect system level metrics from operating systems, such as CPU / Disk / Network / Process statistics. Fluent Bit 1.8.0 includes node exporter metrics plugin that builds off the Prometheus design to collect system level metrics without having to manage two separate processes or agents. + +The initial release of Node Exporter Metrics contains a subset of collectors and metrics available from Prometheus Node Exporter and we plan to expand them over time. + +**Important note:** Metrics collected with Node Exporter Metrics flow through a separate pipeline from logs and current filters do not operate on top of metrics. + +This plugin is supported on Linux-based operating systems for the most part with macOS offering a reduced subset of metrics. +The table below indicates which collector is supported on macOS. + + +## Configuration + +| Key | Description | Default | +| --------------- | ---------------------------------------------------------------------- | --------- | +| scrape_interval | The rate at which metrics are collected from the host operating system | 5 seconds | +| path.procfs | The mount point used to collect process information and metrics | /proc/ | +| path.sysfs | The path in the filesystem used to collect system metrics | /sys/ | +| collector.cpu.scrape\_interval | The rate in seconds at which cpu metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.cpufreq.scrape\_interval | The rate in seconds at which cpufreq metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.meminfo.scrape\_interval | The rate in seconds at which meminfo metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.diskstats.scrape\_interval | The rate in seconds at which diskstats metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.filesystem.scrape\_interval | The rate in seconds at which filesystem metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.uname.scrape\_interval | The rate in seconds at which uname metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used.| 0 seconds | +| collector.stat.scrape\_interval | The rate in seconds at which stat metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.time.scrape\_interval | The rate in seconds at which time metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.loadavg.scrape\_interval | The rate in seconds at which loadavg metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.vmstat.scrape\_interval | The rate in seconds at which vmstat metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.thermal_zone.scrape\_interval | The rate in seconds at which thermal_zone metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.filefd.scrape\_interval | The rate in seconds at which filefd metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.nvme.scrape\_interval | The rate in seconds at which nvme metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| collector.processes.scrape\_interval | The rate in seconds at which system level of process metrics are collected from the host operating system. If a value greater than 0 is used then it overrides the global default otherwise the global default is used. | 0 seconds | +| metrics | To specify which metrics are collected from the host operating system. These metrics depend on `/proc` or `/sys` fs. The actual values of metrics will be read from `/proc` or `/sys` when needed. cpu, cpufreq, meminfo, diskstats, filesystem, stat, loadavg, vmstat, netdev, and filefd depend on procfs. cpufreq metrics depend on sysfs. | `"cpu,cpufreq,meminfo,diskstats,filesystem,uname,stat,time,loadavg,vmstat,netdev,filefd"` | +| filesystem.ignore\_mount\_point\_regex | Specify the regex for the mount points to prevent collection of/ignore. | `^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)` | +| filesystem.ignore\_filesystem\_type\_regex | Specify the regex for the filesystem types to prevent collection of/ignore. | `^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$` | +| diskstats.ignore\_device\_regex | Specify the regex for the diskstats to prevent collection of/ignore. | `^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$` | +| systemd_service_restart_metrics | Determines if the collector will include service restart metrics | false | +| systemd_unit_start_time_metrics | Determines if the collector will include unit start time metrics | false | +| systemd_include_service_task_metrics | Determines if the collector will include service task metrics | false | +| systemd_include_pattern | regex to determine which units are included in the metrics produced by the systemd collector | It is not applied unless explicitly set | +| systemd_exclude_pattern | regex to determine which units are excluded in the metrics produced by the systemd collector | `.+\\.(automount|device|mount|scope|slice)"` | + + +**Note:** The plugin top-level `scrape_interval` setting is the global default with any custom settings for individual `scrape_intervals` then overriding just that specific metric scraping interval. +Each `collector.xxx.scrape_interval` option only overrides the interval for that specific collector and updates the associated set of provided metrics. + +The overridden intervals only change the collection interval, not the interval for publishing the metrics which is taken from the global setting. +For example, if the global interval is set to 5s and an override interval of 60s is used then the published metrics will be reported every 5s but for the specific collector they will stay the same for 60s until it is collected again. +This feature aims to help with down-sampling when collecting metrics. + + +## Collectors available + +The following table describes the available collectors as part of this plugin. All of them are enabled by default and respects the original metrics name, descriptions, and types from Prometheus Exporter, so you can use your current dashboards without any compatibility problem. + +> note: the Version column specifies the Fluent Bit version where the collector is available. + +| Name | Description | OS | Version | +| ----------------- | ------------------------------------------------------------------------------------------------ | ----------- | ------- | +| cpu | Exposes CPU statistics. | Linux,macOS | v1.8 | +| cpufreq | Exposes CPU frequency statistics. | Linux | v1.8 | +| diskstats | Exposes disk I/O statistics. | Linux,macOS | v1.8 | +| filefd | Exposes file descriptor statistics from `/proc/sys/fs/file-nr`. | Linux | v1.8.2 | +| filesystem | Exposes filesystem statistics from `/proc/*/mounts`. | Linux | v2.0.9 | +| loadavg | Exposes load average. | Linux,macOS | v1.8 | +| meminfo | Exposes memory statistics. | Linux,macOS | v1.8 | +| netdev | Exposes network interface statistics such as bytes transferred. | Linux,macOS | v1.8.2 | +| stat | Exposes various statistics from `/proc/stat`. This includes boot time, forks, and interruptions. | Linux | v1.8 | +| time | Exposes the current system time. | Linux | v1.8 | +| uname | Exposes system information as provided by the uname system call. | Linux,macOS | v1.8 | +| vmstat | Exposes statistics from `/proc/vmstat`. | Linux | v1.8.2 | +| systemd collector | Exposes statistics from systemd. | Linux | v2.1.3 | +| thermal_zone | Expose thermal statistics from `/sys/class/thermal/thermal_zone/*` | Linux | v2.2.1 | +| nvme | Exposes nvme statistics from `/proc`. | Linux | v2.2.0 | +| processes | Exposes processes statistics from `/proc`. | Linux | v2.2.0 | + +## Getting Started + +### Simple Configuration File + +In the following configuration file, the input plugin _node_exporter_metrics collects _metrics every 2 seconds and exposes them through our [Prometheus Exporter](../outputs/prometheus-exporter.md) output plugin on HTTP/TCP port 2021. + +{% tabs %} +{% tab title="fluent-bit.conf" %} +``` +# Node Exporter Metrics + Prometheus Exporter +# ------------------------------------------- +# The following example collect host metrics on Linux and expose +# them through a Prometheus HTTP end-point. +# +# After starting the service try it with: +# +# $ curl http://127.0.0.1:2021/metrics +# +[SERVICE] + flush 1 + log_level info + +[INPUT] + name node_exporter_metrics + tag node_metrics + scrape_interval 2 + +[OUTPUT] + name prometheus_exporter + match node_metrics + host 0.0.0.0 + port 2021 + + +``` +{% endtab %} + +{% tab title="fluent-bit.yaml" %} +```yaml +# Node Exporter Metrics + Prometheus Exporter +# ------------------------------------------- +# The following example collect host metrics on Linux and expose +# them through a Prometheus HTTP end-point. +# +# After starting the service try it with: +# +# $ curl http://127.0.0.1:2021/metrics +# +service: + flush: 1 + log_level: info +pipeline: + inputs: + - name: node_exporter_metrics + tag: node_metrics + scrape_interval: 2 + outputs: + - name: prometheus_exporter + match: node_metrics + host: 0.0.0.0 + port: 2021 +``` +{% endtab %} +{% endtabs %} + +You can test the expose of the metrics by using _curl:_ + +```bash +curl http://127.0.0.1:2021/metrics +``` + +### Container to Collect Host Metrics + +When deploying Fluent Bit in a container you will need to specify additional settings to ensure that Fluent Bit has access to the host operating system. The following docker command deploys Fluent Bit with specific mount paths and settings enabled to ensure that Fluent Bit can collect from the host. These are then exposed over port 2021. + +``` +docker run -ti -v /proc:/host/proc \ + -v /sys:/host/sys \ + -p 2021:2021 \ + fluent/fluent-bit:1.8.0 \ + /fluent-bit/bin/fluent-bit \ + -i node_exporter_metrics -p path.procfs=/host/proc -p path.sysfs=/host/sys \ + -o prometheus_exporter -p "add_label=host $HOSTNAME" \ + -f 1 +``` + +### Fluent Bit + Prometheus + Grafana + +If you like dashboards for monitoring, Grafana is one of the preferred options. In our Fluent Bit source code repository, we have pushed a simple **docker-compose **example. Steps: + +#### Get a copy of Fluent Bit source code + +```bash +git clone https://github.com/fluent/fluent-bit +cd fluent-bit/docker_compose/node-exporter-dashboard/ +``` + +#### Start the service and view your Dashboard + +``` +docker-compose up --force-recreate -d --build +``` + +Now open your browser in the address **http://127.0.0.1:3000**. When asked for the credentials to access Grafana, just use the **admin **username and **admin **password**.** + +![](../../.gitbook/assets/updated.png) + +Note that by default Grafana dashboard plots the data from the last 24 hours, so just change it to **Last 5 minutes** to see the recent data being collected. + +#### Stop the Service + +```bash +docker-compose down +``` + +## Enhancement Requests + +Our current plugin implements a sub-set of the available collectors in the original Prometheus Node Exporter, if you would like that we prioritize a specific collector please open a Github issue by using the following template:\ +\ +\- [in_node_exporter_metrics](https://github.com/fluent/fluent-bit/issues/new?assignees=\&labels=\&template=feature_request.md\&title=in_node_exporter_metrics:%20add%20ABC%20collector) + diff --git a/fluent-bit-language-server/src/completion.rs b/fluent-bit-language-server/src/completion.rs index d87ef5e..30f6b31 100644 --- a/fluent-bit-language-server/src/completion.rs +++ b/fluent-bit-language-server/src/completion.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, fmt::Display, str::FromStr, string::ToString}; +use convert_case::{Case, Casing}; use once_cell::sync::Lazy; use tower_lsp::lsp_types::{ CompletionItem, CompletionItemKind, CompletionItemLabelDetails, Documentation, @@ -97,9 +98,18 @@ impl FlbConfigParameter { #[derive(Clone)] pub(crate) struct FlbCompletionSnippet { + /// Completion Label which will be printed in the completion list + /// + /// e.g. "Network I/O Metrics" label: String, + + /// Plugin name which will be used in the configuration file + /// + /// e.g. `netif` + plugin_name: String, documentation_markdown: String, config_params: Vec, + // XXX: maybe no need // detail: Option, // label_details: Option, // label_details_desc: Option, @@ -108,11 +118,13 @@ pub(crate) struct FlbCompletionSnippet { impl FlbCompletionSnippet { pub fn new( label: &str, + plugin_name: Option<&str>, documentation_markdown: &str, config_params: Vec, ) -> Self { FlbCompletionSnippet { label: label.to_string(), + plugin_name: plugin_name.map_or_else(|| label.to_case(Case::Snake), |s| s.to_string()), documentation_markdown: documentation_markdown.to_string(), config_params, } @@ -121,7 +133,7 @@ impl FlbCompletionSnippet { pub fn props_to_insert_text(&self) -> String { const KEY_WIDTH: usize = 15; // TODO: dynamic? - let mut ret = format!("{:KEY_WIDTH$} {}\n", "Name", self.label.to_lowercase()); + let mut ret = format!("{:KEY_WIDTH$} {}\n", "Name", self.plugin_name); for (index, param) in self.config_params.iter().enumerate() { let tab_stop = index + 1; @@ -230,7 +242,29 @@ macro_rules! add_snippet { FlbConfigParameter::new($key, $default, $desc), )* ]; - let snippet = FlbCompletionSnippet::new($label, read_flb_docs!($doc_path), config_params); + let snippet = FlbCompletionSnippet::new($label, None, read_flb_docs!($doc_path), config_params); + $flb_data.add_snippet($section, snippet); + }; + + ( + $flb_data:expr, + $section:expr, + $label:expr, + $plugin_name:expr, + $doc_path:expr, + [ + $( + ($key:expr, $default:expr, $desc:expr) + ),* + $(,)? + ] + ) => { + let config_params = vec![ + $( + FlbConfigParameter::new($key, $default, $desc), + )* + ]; + let snippet = FlbCompletionSnippet::new($label, Some($plugin_name), read_flb_docs!($doc_path), config_params); $flb_data.add_snippet($section, snippet); }; } @@ -257,12 +291,12 @@ static FLB_DATA: Lazy = Lazy::new(|| { ("Interval_NSec", Some("0"), "Polling interval in nanoseconds"), ("Dev_Name", None, "Device name to limit the target. (e.g. sda). If not set, in_disk gathers information from all of disks and partitions."), ]); - add_snippet!(data, FlbSectionType::Input, "Docker Metrics", "input/docker-metrics", [ + add_snippet!(data, FlbSectionType::Input, "Docker Metrics", "docker", "input/docker-metrics", [ ("Interval_Sec", Some("1"), "Polling interval in seconds"), ("Include", None, "A space-separated list of containers to include"), ("Exclude", None, "A space-separated list of containers to exclude"), ]); - add_snippet!(data, FlbSectionType::Input, "Docker Events", "input/docker-events", [ + add_snippet!(data, FlbSectionType::Input, "Docker Events", "docker_events", "input/docker-events", [ ("Unix_Path", Some("/var/run/docker.sock"), "The docker socket unix path"), ("Buffer_Size", Some("8192"), "The size of the buffer used to read docker events (in bytes)"), ("Parser", None, "Specify the name of a parser to interpret the entry as a structured message."), @@ -300,7 +334,7 @@ static FLB_DATA: Lazy = Lazy::new(|| { ("Exit_After_Oneshot", Some("false"), "Exit as soon as the one-shot command exits. This allows the exec plugin to be used as a wrapper for another command, sending the target command's output to any fluent-bit sink(s) then exiting."), ("Propagate_Exit_Code", Some("false"), "When exiting due to Exit_After_Oneshot, cause fluent-bit to exit with the exit code of the command exited by this plugin. Follows [shell conventions for exit code propagation](https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html)."), ]); - add_snippet!(data, FlbSectionType::Input, "Exec Wasi", "input/exec-wasi", [ + add_snippet!(data, FlbSectionType::Input, "Exec Wasi", "wasi", "input/exec-wasi", [ ("WASI_Path", None, "The place of a WASM program file."), ("Parser", None, "Specify the name of a parser to interpret the entry as a structured message."), ("Accessible_Paths", None, "Specify the whitelist of paths to be able to access paths from WASM programs."), @@ -309,7 +343,7 @@ static FLB_DATA: Lazy = Lazy::new(|| { ("Buf_Size", None, "Size of the buffer (check [unit sizes](https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/unit-sizes) for allowed values)"), ("Oneshot", Some("false"), "Only run once at startup. This allows collection of data precedent to fluent-bit's startup"), ]); - add_snippet!(data, FlbSectionType::Input, "Fluent Bit Metrics", "input/fluentbit-metrics", [ + add_snippet!(data, FlbSectionType::Input, "Fluent Bit Metrics", "fluentbit_metrics", "input/fluentbit-metrics", [ ("scrape_interval", Some("2"), "The rate at which metrics are collected from the host operating system"), ("scrape_on_start", Some("false"), "Scrape metrics upon start, useful to avoid waiting for `scrape_interval` for the first round of metrics."), ]); @@ -363,7 +397,7 @@ static FLB_DATA: Lazy = Lazy::new(|| { ("poll_ms", Some("500"), "Kafka brokers polling interval in milliseconds."), ("Buffer_Max_Size", Some("4M"), "Specify the maximum size of buffer per cycle to poll kafka messages from subscribed topics. To increase throughput, specify larger size."), ]); - add_snippet!(data, FlbSectionType::Input, "Kernel Logs", "input/kernel-logs", [ + add_snippet!(data, FlbSectionType::Input, "Kernel Logs", "kmsg", "input/kernel-logs", [ ("Prio_Level", Some("8"), "The log level to filter. The kernel log is dropped if its priority is more than prio_level. Allowed values are 0-8. Default is 8. 8 means all logs are saved."), ]); add_snippet!(data, FlbSectionType::Input, "Kubernetes Events", "input/kubernetes-events", [ @@ -390,6 +424,19 @@ static FLB_DATA: Lazy = Lazy::new(|| { ("Port", Some("1883"), "TCP port where listening for connections"), ("Payload_Key", None, "Specify the key where the payload key/value will be preserved"), ]); + add_snippet!(data, FlbSectionType::Input, "Network I/O Metrics", "netif", "input/network-io-metrics", [ + ("Interface", None, "Specify the network interface to monitor. e.g. eth0"), + ("Interval_Sec", Some("1"), "Polling interval (seconds)."), + ("Interval_NSec", Some("0"), "Polling interval (nanoseconds)."), + ("Verbose", Some("false"), "If true, gather metrics precisely."), + ("Test_At_Init", Some("false"), "If true, testing if the network interface is valid at initialization."), + ]); + add_snippet!(data, FlbSectionType::Input, "NGINX Exporter Metrics", "nginx_metrics", "input/nginx", [ + ("Host", Some("localhost"), "Name of the target host or IP address to check."), + ("Port", Some("80"), "Port of the target nginx service to connect to."), + ("Status_URL", Some("/status"), "The URL of the Stub Status Handler."), + ("Nginx_Plus", Some("true"), "Turn on NGINX plus mode."), + ]); ////////////////////////////////////////////////////////////////////////////////////////// // Output diff --git a/ide-completion/Cargo.toml b/ide-completion/Cargo.toml new file mode 100644 index 0000000..c512786 --- /dev/null +++ b/ide-completion/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "ide-completion" +version = "0.1.0" +rust-version.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] diff --git a/ide-completion/src/lib.rs b/ide-completion/src/lib.rs new file mode 100644 index 0000000..b93cf3f --- /dev/null +++ b/ide-completion/src/lib.rs @@ -0,0 +1,14 @@ +pub fn add(left: u64, right: u64) -> u64 { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +}